CINXE.COM

Search results for: Graph Neural Network (GNN)

<!DOCTYPE html> <html lang="en" dir="ltr"> <head> <!-- Google tag (gtag.js) --> <script async src="https://www.googletagmanager.com/gtag/js?id=G-P63WKM1TM1"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-P63WKM1TM1'); </script> <!-- Yandex.Metrika counter --> <script type="text/javascript" > (function(m,e,t,r,i,k,a){m[i]=m[i]||function(){(m[i].a=m[i].a||[]).push(arguments)}; m[i].l=1*new Date(); for (var j = 0; j < document.scripts.length; j++) {if (document.scripts[j].src === r) { return; }} k=e.createElement(t),a=e.getElementsByTagName(t)[0],k.async=1,k.src=r,a.parentNode.insertBefore(k,a)}) (window, document, "script", "https://mc.yandex.ru/metrika/tag.js", "ym"); ym(55165297, "init", { clickmap:false, trackLinks:true, accurateTrackBounce:true, webvisor:false }); </script> <noscript><div><img src="https://mc.yandex.ru/watch/55165297" style="position:absolute; left:-9999px;" alt="" /></div></noscript> <!-- /Yandex.Metrika counter --> <!-- Matomo --> <script> var _paq = window._paq = window._paq || []; /* tracker methods like "setCustomDimension" should be called before "trackPageView" */ _paq.push(['trackPageView']); _paq.push(['enableLinkTracking']); (function() { var u="//matomo.waset.org/"; _paq.push(['setTrackerUrl', u+'matomo.php']); _paq.push(['setSiteId', '2']); var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0]; g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s); })(); </script> <!-- End Matomo Code --> <title>Search results for: Graph Neural Network (GNN)</title> <meta name="description" content="Search results for: Graph Neural Network (GNN)"> <meta name="keywords" content="Graph Neural Network (GNN)"> <meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1, user-scalable=no"> <meta charset="utf-8"> <link href="https://cdn.waset.org/favicon.ico" type="image/x-icon" rel="shortcut icon"> <link href="https://cdn.waset.org/static/plugins/bootstrap-4.2.1/css/bootstrap.min.css" rel="stylesheet"> <link href="https://cdn.waset.org/static/plugins/fontawesome/css/all.min.css" rel="stylesheet"> <link href="https://cdn.waset.org/static/css/site.css?v=150220211555" rel="stylesheet"> </head> <body> <header> <div class="container"> <nav class="navbar navbar-expand-lg navbar-light"> <a class="navbar-brand" href="https://waset.org"> <img src="https://cdn.waset.org/static/images/wasetc.png" alt="Open Science Research Excellence" title="Open Science Research Excellence" /> </a> <button class="d-block d-lg-none navbar-toggler ml-auto" type="button" data-toggle="collapse" data-target="#navbarMenu" aria-controls="navbarMenu" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> <div class="w-100"> <div class="d-none d-lg-flex flex-row-reverse"> <form method="get" action="https://waset.org/search" class="form-inline my-2 my-lg-0"> <input class="form-control mr-sm-2" type="search" placeholder="Search Conferences" value="Graph Neural Network (GNN)" name="q" aria-label="Search"> <button class="btn btn-light my-2 my-sm-0" type="submit"><i class="fas fa-search"></i></button> </form> </div> <div class="collapse navbar-collapse mt-1" id="navbarMenu"> <ul class="navbar-nav ml-auto align-items-center" id="mainNavMenu"> <li class="nav-item"> <a class="nav-link" href="https://waset.org/conferences" title="Conferences in 2025/2026/2027">Conferences</a> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/disciplines" title="Disciplines">Disciplines</a> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/committees" rel="nofollow">Committees</a> </li> <li class="nav-item dropdown"> <a class="nav-link dropdown-toggle" href="#" id="navbarDropdownPublications" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> Publications </a> <div class="dropdown-menu" aria-labelledby="navbarDropdownPublications"> <a class="dropdown-item" href="https://publications.waset.org/abstracts">Abstracts</a> <a class="dropdown-item" href="https://publications.waset.org">Periodicals</a> <a class="dropdown-item" href="https://publications.waset.org/archive">Archive</a> </div> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/page/support" title="Support">Support</a> </li> </ul> </div> </div> </nav> </div> </header> <main> <div class="container mt-4"> <div class="row"> <div class="col-md-9 mx-auto"> <form method="get" action="https://publications.waset.org/abstracts/search"> <div id="custom-search-input"> <div class="input-group"> <i class="fas fa-search"></i> <input type="text" class="search-query" name="q" placeholder="Author, Title, Abstract, Keywords" value="Graph Neural Network (GNN)"> <input type="submit" class="btn_search" value="Search"> </div> </div> </form> </div> </div> <div class="row mt-3"> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Commenced</strong> in January 2007</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Frequency:</strong> Monthly</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Edition:</strong> International</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Paper Count:</strong> 5697</div> </div> </div> </div> <h1 class="mt-3 mb-3 text-center" style="font-size:1.6rem;">Search results for: Graph Neural Network (GNN)</h1> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5697</span> Multi-Stream Graph Attention Network for Recommendation with Knowledge Graph</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Zhifei%20Hu">Zhifei Hu</a>, <a href="https://publications.waset.org/abstracts/search?q=Feng%20Xia"> Feng Xia</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In recent years, Graph neural network has been widely used in knowledge graph recommendation. The existing recommendation methods based on graph neural network extract information from knowledge graph through entity and relation, which may not be efficient in the way of information extraction. In order to better propose useful entity information for the current recommendation task in the knowledge graph, we propose an end-to-end Neural network Model based on multi-stream graph attentional Mechanism (MSGAT), which can effectively integrate the knowledge graph into the recommendation system by evaluating the importance of entities from both users and items. Specifically, we use the attention mechanism from the user's perspective to distil the domain nodes information of the predicted item in the knowledge graph, to enhance the user's information on items, and generate the feature representation of the predicted item. Due to user history, click items can reflect the user's interest distribution, we propose a multi-stream attention mechanism, based on the user's preference for entities and relationships, and the similarity between items to be predicted and entities, aggregate user history click item's neighborhood entity information in the knowledge graph and generate the user's feature representation. We evaluate our model on three real recommendation datasets: Movielens-1M (ML-1M), LFM-1B 2015 (LFM-1B), and Amazon-Book (AZ-book). Experimental results show that compared with the most advanced models, our proposed model can better capture the entity information in the knowledge graph, which proves the validity and accuracy of the model. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=graph%20attention%20network" title="graph attention network">graph attention network</a>, <a href="https://publications.waset.org/abstracts/search?q=knowledge%20graph" title=" knowledge graph"> knowledge graph</a>, <a href="https://publications.waset.org/abstracts/search?q=recommendation" title=" recommendation"> recommendation</a>, <a href="https://publications.waset.org/abstracts/search?q=information%20propagation" title=" information propagation"> information propagation</a> </p> <a href="https://publications.waset.org/abstracts/150710/multi-stream-graph-attention-network-for-recommendation-with-knowledge-graph" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/150710.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">124</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5696</span> Enhanced Retrieval-Augmented Generation (RAG) Method with Knowledge Graph and Graph Neural Network (GNN) for Automated QA Systems</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Zhihao%20Zheng">Zhihao Zheng</a>, <a href="https://publications.waset.org/abstracts/search?q=Zhilin%20Wang"> Zhilin Wang</a>, <a href="https://publications.waset.org/abstracts/search?q=Linxin%20Liu"> Linxin Liu</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In the research of automated knowledge question-answering systems, accuracy and efficiency are critical challenges. This paper proposes a knowledge graph-enhanced Retrieval-Augmented Generation (RAG) method, combined with a Graph Neural Network (GNN) structure, to automatically determine the correctness of knowledge competition questions. First, a domain-specific knowledge graph was constructed from a large corpus of academic journal literature, with key entities and relationships extracted using Natural Language Processing (NLP) techniques. Then, the RAG method's retrieval module was expanded to simultaneously query both text databases and the knowledge graph, leveraging the GNN to further extract structured information from the knowledge graph. During answer generation, contextual information provided by the knowledge graph and GNN is incorporated to improve the accuracy and consistency of the answers. Experimental results demonstrate that the knowledge graph and GNN-enhanced RAG method perform excellently in determining the correctness of questions, achieving an accuracy rate of 95%. Particularly in cases involving ambiguity or requiring contextual information, the structured knowledge provided by the knowledge graph and GNN significantly enhances the RAG method's performance. This approach not only demonstrates significant advantages in improving the accuracy and efficiency of automated knowledge question-answering systems but also offers new directions and ideas for future research and practical applications. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=knowledge%20graph" title="knowledge graph">knowledge graph</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20neural%20network" title=" graph neural network"> graph neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=retrieval-augmented%20generation" title=" retrieval-augmented generation"> retrieval-augmented generation</a>, <a href="https://publications.waset.org/abstracts/search?q=NLP" title=" NLP"> NLP</a> </p> <a href="https://publications.waset.org/abstracts/188751/enhanced-retrieval-augmented-generation-rag-method-with-knowledge-graph-and-graph-neural-network-gnn-for-automated-qa-systems" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/188751.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">52</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5695</span> Explainable Graph Attention Networks</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=David%20Pham">David Pham</a>, <a href="https://publications.waset.org/abstracts/search?q=Yongfeng%20Zhang"> Yongfeng Zhang</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Graphs are an important structure for data storage and computation. Recent years have seen the success of deep learning on graphs such as Graph Neural Networks (GNN) on various data mining and machine learning tasks. However, most of the deep learning models on graphs cannot easily explain their predictions and are thus often labelled as “black boxes.” For example, Graph Attention Network (GAT) is a frequently used GNN architecture, which adopts an attention mechanism to carefully select the neighborhood nodes for message passing and aggregation. However, it is difficult to explain why certain neighbors are selected while others are not and how the selected neighbors contribute to the final classification result. In this paper, we present a graph learning model called Explainable Graph Attention Network (XGAT), which integrates graph attention modeling and explainability. We use a single model to target both the accuracy and explainability of problem spaces and show that in the context of graph attention modeling, we can design a unified neighborhood selection strategy that selects appropriate neighbor nodes for both better accuracy and enhanced explainability. To justify this, we conduct extensive experiments to better understand the behavior of our model under different conditions and show an increase in both accuracy and explainability. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=explainable%20AI" title="explainable AI">explainable AI</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20attention%20network" title=" graph attention network"> graph attention network</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20neural%20network" title=" graph neural network"> graph neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=node%20classification" title=" node classification"> node classification</a> </p> <a href="https://publications.waset.org/abstracts/156796/explainable-graph-attention-networks" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/156796.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">210</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5694</span> Autism Spectrum Disorder Classification Algorithm Using Multimodal Data Based on Graph Convolutional Network</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Yuntao%20Liu">Yuntao Liu</a>, <a href="https://publications.waset.org/abstracts/search?q=Lei%20Wang"> Lei Wang</a>, <a href="https://publications.waset.org/abstracts/search?q=Haoran%20Xia"> Haoran Xia</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Machine learning has shown extensive applications in the development of classification models for autism spectrum disorder (ASD) using neural image data. This paper proposes a fusion multi-modal classification network based on a graph neural network. First, the brain is segmented into 116 regions of interest using a medical segmentation template (AAL, Anatomical Automatic Labeling). The image features of sMRI and the signal features of fMRI are extracted, which build the node and edge embedding representations of the brain map. Then, we construct a dynamically updated brain map neural network and propose a method based on a dynamic brain map adjacency matrix update mechanism and learnable graph to further improve the accuracy of autism diagnosis and recognition results. Based on the Autism Brain Imaging Data Exchange I dataset(ABIDE I), we reached a prediction accuracy of 74% between ASD and TD subjects. Besides, to study the biomarkers that can help doctors analyze diseases and interpretability, we used the features by extracting the top five maximum and minimum ROI weights. This work provides a meaningful way for brain disorder identification. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=autism%20spectrum%20disorder" title="autism spectrum disorder">autism spectrum disorder</a>, <a href="https://publications.waset.org/abstracts/search?q=brain%20map" title=" brain map"> brain map</a>, <a href="https://publications.waset.org/abstracts/search?q=supervised%20machine%20learning" title=" supervised machine learning"> supervised machine learning</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20network" title=" graph network"> graph network</a>, <a href="https://publications.waset.org/abstracts/search?q=multimodal%20data" title=" multimodal data"> multimodal data</a>, <a href="https://publications.waset.org/abstracts/search?q=model%20interpretability" title=" model interpretability"> model interpretability</a> </p> <a href="https://publications.waset.org/abstracts/183319/autism-spectrum-disorder-classification-algorithm-using-multimodal-data-based-on-graph-convolutional-network" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/183319.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">91</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5693</span> A New Graph Theoretic Problem with Ample Practical Applications</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Mehmet%20Hakan%20Karaata">Mehmet Hakan Karaata</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In this paper, we first coin a new graph theocratic problem with numerous applications. Second, we provide two algorithms for the problem. The first solution is using a brute-force techniques, whereas the second solution is based on an initial identification of the cycles in the given graph. We then provide a correctness proof of the algorithm. The applications of the problem include graph analysis, graph drawing and network structuring. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=algorithm" title="algorithm">algorithm</a>, <a href="https://publications.waset.org/abstracts/search?q=cycle" title=" cycle"> cycle</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20algorithm" title=" graph algorithm"> graph algorithm</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20theory" title=" graph theory"> graph theory</a>, <a href="https://publications.waset.org/abstracts/search?q=network%20structuring" title=" network structuring"> network structuring</a> </p> <a href="https://publications.waset.org/abstracts/67285/a-new-graph-theoretic-problem-with-ample-practical-applications" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/67285.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">396</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5692</span> Enhancing Knowledge Graph Convolutional Networks with Structural Adaptive Receptive Fields for Improved Node Representation and Information Aggregation</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Zheng%20Zhihao">Zheng Zhihao</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Recently, Knowledge Graph Framework Network (KGCN) has developed powerful capabilities in knowledge representation and reasoning tasks. However, traditional KGCN often uses a fixed weight mechanism when aggregating information, failing to make full use of rich structural information, resulting in a certain expression ability of node representation, and easily causing over-smoothing problems. In order to solve these challenges, the paper proposes an new graph neural network model called KGCN-STAR (Knowledge Graph Convolutional Network with Structural Adaptive Receptive Fields). This model dynamically adjusts the perception of each node by introducing a structural adaptive receptive field. wild range, and a subgraph aggregator is designed to capture local structural information more effectively. Experimental results show that KGCN-STAR shows significant performance improvement on multiple knowledge graph data sets, especially showing considerable capabilities in the task of representation learning of complex structures. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=knowledge%20graph" title="knowledge graph">knowledge graph</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20neural%20networks" title=" graph neural networks"> graph neural networks</a>, <a href="https://publications.waset.org/abstracts/search?q=structural%20adaptive%20receptive%20fields" title=" structural adaptive receptive fields"> structural adaptive receptive fields</a>, <a href="https://publications.waset.org/abstracts/search?q=information%20aggregation" title=" information aggregation"> information aggregation</a> </p> <a href="https://publications.waset.org/abstracts/191048/enhancing-knowledge-graph-convolutional-networks-with-structural-adaptive-receptive-fields-for-improved-node-representation-and-information-aggregation" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/191048.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">42</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5691</span> Message Passing Neural Network (MPNN) Approach to Multiphase Diffusion in Reservoirs for Well Interconnection Assessments</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Margarita%20Mayoral-Villa">Margarita Mayoral-Villa</a>, <a href="https://publications.waset.org/abstracts/search?q=J.%20Klapp"> J. Klapp</a>, <a href="https://publications.waset.org/abstracts/search?q=L.%20Di%20G.%20Sigalotti"> L. Di G. Sigalotti</a>, <a href="https://publications.waset.org/abstracts/search?q=J.%20E.%20V.%20Guzm%C3%A1n"> J. E. V. Guzmán</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Automated learning techniques are widely applied in the energy sector to address challenging problems from a practical point of view. To this end, we discuss the implementation of a Message Passing algorithm (MPNN)within a Graph Neural Network(GNN)to leverage the neighborhood of a set of nodes during the aggregation process. This approach enables the characterization of multiphase diffusion processes in the reservoir, such that the flow paths underlying the interconnections between multiple wells may be inferred from previously available data on flow rates and bottomhole pressures. The results thus obtained compare favorably with the predictions produced by the Reduced Order Capacitance-Resistance Models (CRM) and suggest the potential of MPNNs to enhance the robustness of the forecasts while improving the computational efficiency. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=multiphase%20diffusion" title="multiphase diffusion">multiphase diffusion</a>, <a href="https://publications.waset.org/abstracts/search?q=message%20passing%20neural%20network" title=" message passing neural network"> message passing neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=well%20interconnection" title=" well interconnection"> well interconnection</a>, <a href="https://publications.waset.org/abstracts/search?q=interwell%20connectivity" title=" interwell connectivity"> interwell connectivity</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20neural%20network" title=" graph neural network"> graph neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=capacitance-resistance%20models" title=" capacitance-resistance models"> capacitance-resistance models</a> </p> <a href="https://publications.waset.org/abstracts/146840/message-passing-neural-network-mpnn-approach-to-multiphase-diffusion-in-reservoirs-for-well-interconnection-assessments" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/146840.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">156</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5690</span> Scene Classification Using Hierarchy Neural Network, Directed Acyclic Graph Structure, and Label Relations</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Po-Jen%20Chen">Po-Jen Chen</a>, <a href="https://publications.waset.org/abstracts/search?q=Jian-Jiun%20Ding"> Jian-Jiun Ding</a>, <a href="https://publications.waset.org/abstracts/search?q=Hung-Wei%20Hsu"> Hung-Wei Hsu</a>, <a href="https://publications.waset.org/abstracts/search?q=Chien-Yao%20Wang"> Chien-Yao Wang</a>, <a href="https://publications.waset.org/abstracts/search?q=Jia-Ching%20Wang"> Jia-Ching Wang</a> </p> <p class="card-text"><strong>Abstract:</strong></p> A more accurate scene classification algorithm using label relations and the hierarchy neural network was developed in this work. In many classification algorithms, it is assumed that the labels are mutually exclusive. This assumption is true in some specific problems, however, for scene classification, the assumption is not reasonable. Because there are a variety of objects with a photo image, it is more practical to assign multiple labels for an image. In this paper, two label relations, which are exclusive relation and hierarchical relation, were adopted in the classification process to achieve more accurate multiple label classification results. Moreover, the hierarchy neural network (hierarchy NN) is applied to classify the image and the directed acyclic graph structure is used for predicting a more reasonable result which obey exclusive and hierarchical relations. Simulations show that, with these techniques, a much more accurate scene classification result can be achieved. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=convolutional%20neural%20network" title="convolutional neural network">convolutional neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=label%20relation" title=" label relation"> label relation</a>, <a href="https://publications.waset.org/abstracts/search?q=hierarchy%20neural%20network" title=" hierarchy neural network"> hierarchy neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=scene%20classification" title=" scene classification"> scene classification</a> </p> <a href="https://publications.waset.org/abstracts/66516/scene-classification-using-hierarchy-neural-network-directed-acyclic-graph-structure-and-label-relations" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/66516.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">466</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5689</span> Metric Dimension on Line Graph of Honeycomb Networks</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=M.%20Hussain">M. Hussain</a>, <a href="https://publications.waset.org/abstracts/search?q=Aqsa%20Farooq"> Aqsa Farooq</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Let G = (V,E) be a connected graph and distance between any two vertices a and b in G is a&minus;b geodesic and is denoted by d(a, b). A set of vertices W resolves a graph G if each vertex is uniquely determined by its vector of distances to the vertices in W. A metric dimension of G is the minimum cardinality of a resolving set of G. In this paper line graph of honeycomb network has been derived and then we calculated the metric dimension on line graph of honeycomb network. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=Resolving%20set" title="Resolving set">Resolving set</a>, <a href="https://publications.waset.org/abstracts/search?q=Metric%20dimension" title=" Metric dimension"> Metric dimension</a>, <a href="https://publications.waset.org/abstracts/search?q=Honeycomb%20network" title=" Honeycomb network"> Honeycomb network</a>, <a href="https://publications.waset.org/abstracts/search?q=Line%20graph" title=" Line graph"> Line graph</a> </p> <a href="https://publications.waset.org/abstracts/101558/metric-dimension-on-line-graph-of-honeycomb-networks" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/101558.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">214</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5688</span> GRCNN: Graph Recognition Convolutional Neural Network for Synthesizing Programs from Flow Charts</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Lin%20Cheng">Lin Cheng</a>, <a href="https://publications.waset.org/abstracts/search?q=Zijiang%20Yang"> Zijiang Yang</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Program synthesis is the task to automatically generate programs based on user specification. In this paper, we present a framework that synthesizes programs from flow charts that serve as accurate and intuitive specification. In order doing so, we propose a deep neural network called GRCNN that recognizes graph structure from its image. GRCNN is trained end-to-end, which can predict edge and node information of the flow chart simultaneously. Experiments show that the accuracy rate to synthesize a program is 66.4%, and the accuracy rates to recognize edge and node are 94.1% and 67.9%, respectively. On average, it takes about 60 milliseconds to synthesize a program. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=program%20synthesis" title="program synthesis">program synthesis</a>, <a href="https://publications.waset.org/abstracts/search?q=flow%20chart" title=" flow chart"> flow chart</a>, <a href="https://publications.waset.org/abstracts/search?q=specification" title=" specification"> specification</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20recognition" title=" graph recognition"> graph recognition</a>, <a href="https://publications.waset.org/abstracts/search?q=CNN" title=" CNN"> CNN</a> </p> <a href="https://publications.waset.org/abstracts/124641/grcnn-graph-recognition-convolutional-neural-network-for-synthesizing-programs-from-flow-charts" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/124641.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">124</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5687</span> Graph Neural Network-Based Classification for Disease Prediction in Health Care Heterogeneous Data Structures of Electronic Health Record</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Raghavi%20C.%20Janaswamy">Raghavi C. Janaswamy</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In the healthcare sector, heterogenous data elements such as patients, diagnosis, symptoms, conditions, observation text from physician notes, and prescriptions form the essentials of the Electronic Health Record (EHR). The data in the form of clear text and images are stored or processed in a relational format in most systems. However, the intrinsic structure restrictions and complex joins of relational databases limit the widespread utility. In this regard, the design and development of realistic mapping and deep connections as real-time objects offer unparallel advantages. Herein, a graph neural network-based classification of EHR data has been developed. The patient conditions have been predicted as a node classification task using a graph-based open source EHR data, Synthea Database, stored in Tigergraph. The Synthea DB dataset is leveraged due to its closer representation of the real-time data and being voluminous. The graph model is built from the EHR heterogeneous data using python modules, namely, pyTigerGraph to get nodes and edges from the Tigergraph database, PyTorch to tensorize the nodes and edges, PyTorch-Geometric (PyG) to train the Graph Neural Network (GNN) and adopt the self-supervised learning techniques with the AutoEncoders to generate the node embeddings and eventually perform the node classifications using the node embeddings. The model predicts patient conditions ranging from common to rare situations. The outcome is deemed to open up opportunities for data querying toward better predictions and accuracy. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=electronic%20health%20record" title="electronic health record">electronic health record</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20neural%20network" title=" graph neural network"> graph neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=heterogeneous%20data" title=" heterogeneous data"> heterogeneous data</a>, <a href="https://publications.waset.org/abstracts/search?q=prediction" title=" prediction"> prediction</a> </p> <a href="https://publications.waset.org/abstracts/157840/graph-neural-network-based-classification-for-disease-prediction-in-health-care-heterogeneous-data-structures-of-electronic-health-record" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/157840.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">93</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5686</span> LTAnalyzer: Graph-Based Learning to Detect License Terms for Open Source Software</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Guangjie%20Li">Guangjie Li</a>, <a href="https://publications.waset.org/abstracts/search?q=Shengjie%20Hou"> Shengjie Hou</a>, <a href="https://publications.waset.org/abstracts/search?q=Yi%20Tang"> Yi Tang</a>, <a href="https://publications.waset.org/abstracts/search?q=Zhihua%20Zhang"> Zhihua Zhang</a>, <a href="https://publications.waset.org/abstracts/search?q=Yan%20He"> Yan He</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Open-source software (OSS) licenses specify the conditions to use, modify and redistribute software. They define key license terms, such as copyright, patent rights, and distribution rules, to dictate the rules of using the software. However, it is impractical for developers to manually identify license terms for the complexity and length of licenses, especially when custom or modified licenses are involved in OSS. To bridge this gap, this paper proposes LTAnalyzer to perform license term identification, which automatically identifies the sentences related to license terms in OSS. LTAnalyzer is a learning-based approach that uses a graph network to capture the structure and content information of sentences related to a license term. Based on this network, a graph-based neural network is trained to differentiate license terms and select relevant sentences. Evaluations demonstrate that LTAnalyzer achieves high accuracy, with precision, recall, and Fscore values of 0.950, 0.832, and 0.891, respectively, outperforming state-of-the-art methods by up to 12.8%. For 23 common license terms, LTAnalyzer consistently achieves over 80% precision and recall. This advancement enhances the efficiency and accuracy of license term detection, supporting the growing open-source ecosystem. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=license%20term" title="license term">license term</a>, <a href="https://publications.waset.org/abstracts/search?q=software%20license" title=" software license"> software license</a>, <a href="https://publications.waset.org/abstracts/search?q=open%20source%20software" title=" open source software"> open source software</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20neural%20network" title=" graph neural network"> graph neural network</a> </p> <a href="https://publications.waset.org/abstracts/199194/ltanalyzer-graph-based-learning-to-detect-license-terms-for-open-source-software" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/199194.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5685</span> Survey Paper on Graph Coloring Problem and Its Application</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Prateek%20Chharia">Prateek Chharia</a>, <a href="https://publications.waset.org/abstracts/search?q=Biswa%20Bhusan%20Ghosh"> Biswa Bhusan Ghosh</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Graph coloring is one of the prominent concepts in graph coloring. It can be defined as a coloring of the various regions of the graph such that all the constraints are fulfilled. In this paper various graphs coloring approaches like greedy coloring, Heuristic search for maximum independent set and graph coloring using edge table is described. Graph coloring can be used in various real time applications like student time tabling generation, Sudoku as a graph coloring problem, GSM phone network. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=graph%20coloring" title="graph coloring">graph coloring</a>, <a href="https://publications.waset.org/abstracts/search?q=greedy%20coloring" title=" greedy coloring"> greedy coloring</a>, <a href="https://publications.waset.org/abstracts/search?q=heuristic%20search" title=" heuristic search"> heuristic search</a>, <a href="https://publications.waset.org/abstracts/search?q=edge%20table" title=" edge table"> edge table</a>, <a href="https://publications.waset.org/abstracts/search?q=sudoku%20as%20a%20graph%20coloring%20problem" title=" sudoku as a graph coloring problem"> sudoku as a graph coloring problem</a> </p> <a href="https://publications.waset.org/abstracts/19691/survey-paper-on-graph-coloring-problem-and-its-application" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/19691.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">549</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5684</span> Neural Graph Matching for Modification Similarity Applied to Electronic Document Comparison</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Po-Fang%20Hsu">Po-Fang Hsu</a>, <a href="https://publications.waset.org/abstracts/search?q=Chiching%20Wei"> Chiching Wei</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In this paper, we present a novel neural graph matching approach applied to document comparison. Document comparison is a common task in the legal and financial industries. In some cases, the most important differences may be the addition or omission of words, sentences, clauses, or paragraphs. However, it is a challenging task without recording or tracing the whole edited process. Under many temporal uncertainties, we explore the potentiality of our approach to proximate the accurate comparison to make sure which element blocks have a relation of edition with others. In the beginning, we apply a document layout analysis that combines traditional and modern technics to segment layouts in blocks of various types appropriately. Then we transform this issue into a problem of layout graph matching with textual awareness. Regarding graph matching, it is a long-studied problem with a broad range of applications. However, different from previous works focusing on visual images or structural layout, we also bring textual features into our model for adapting this domain. Specifically, based on the electronic document, we introduce an encoder to deal with the visual presentation decoding from PDF. Additionally, because the modifications can cause the inconsistency of document layout analysis between modified documents and the blocks can be merged and split, Sinkhorn divergence is adopted in our neural graph approach, which tries to overcome both these issues with many-to-many block matching. We demonstrate this on two categories of layouts, as follows., legal agreement and scientific articles, collected from our real-case datasets. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=document%20comparison" title="document comparison">document comparison</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20matching" title=" graph matching"> graph matching</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20neural%20network" title=" graph neural network"> graph neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=modification%20similarity" title=" modification similarity"> modification similarity</a>, <a href="https://publications.waset.org/abstracts/search?q=multi-modal" title=" multi-modal"> multi-modal</a> </p> <a href="https://publications.waset.org/abstracts/141898/neural-graph-matching-for-modification-similarity-applied-to-electronic-document-comparison" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/141898.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">183</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5683</span> Stock Market Prediction Using Convolutional Neural Network That Learns from a Graph</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Mo-Se%20Lee">Mo-Se Lee</a>, <a href="https://publications.waset.org/abstracts/search?q=Cheol-Hwi%20Ahn"> Cheol-Hwi Ahn</a>, <a href="https://publications.waset.org/abstracts/search?q=Kee-Young%20Kwahk"> Kee-Young Kwahk</a>, <a href="https://publications.waset.org/abstracts/search?q=Hyunchul%20Ahn"> Hyunchul Ahn</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Over the past decade, deep learning has been in spotlight among various machine learning algorithms. In particular, CNN (Convolutional Neural Network), which is known as effective solution for recognizing and classifying images, has been popularly applied to classification and prediction problems in various fields. In this study, we try to apply CNN to stock market prediction, one of the most challenging tasks in the machine learning research. In specific, we propose to apply CNN as the binary classifier that predicts stock market direction (up or down) by using a graph as its input. That is, our proposal is to build a machine learning algorithm that mimics a person who looks at the graph and predicts whether the trend will go up or down. Our proposed model consists of four steps. In the first step, it divides the dataset into 5 days, 10 days, 15 days, and 20 days. And then, it creates graphs for each interval in step 2. In the next step, CNN classifiers are trained using the graphs generated in the previous step. In step 4, it optimizes the hyper parameters of the trained model by using the validation dataset. To validate our model, we will apply it to the prediction of KOSPI200 for 1,986 days in eight years (from 2009 to 2016). The experimental dataset will include 14 technical indicators such as CCI, Momentum, ROC and daily closing price of KOSPI200 of Korean stock market. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=convolutional%20neural%20network" title="convolutional neural network">convolutional neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=deep%20learning" title=" deep learning"> deep learning</a>, <a href="https://publications.waset.org/abstracts/search?q=Korean%20stock%20market" title=" Korean stock market"> Korean stock market</a>, <a href="https://publications.waset.org/abstracts/search?q=stock%20market%20prediction" title=" stock market prediction"> stock market prediction</a> </p> <a href="https://publications.waset.org/abstracts/80318/stock-market-prediction-using-convolutional-neural-network-that-learns-from-a-graph" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/80318.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">429</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5682</span> Aspect-Level Sentiment Analysis with Multi-Channel and Graph Convolutional Networks</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Jiajun%20Wang">Jiajun Wang</a>, <a href="https://publications.waset.org/abstracts/search?q=Xiaoge%20Li"> Xiaoge Li</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The purpose of the aspect-level sentiment analysis task is to identify the sentiment polarity of aspects in a sentence. Currently, most methods mainly focus on using neural networks and attention mechanisms to model the relationship between aspects and context, but they ignore the dependence of words in different ranges in the sentence, resulting in deviation when assigning relationship weight to other words other than aspect words. To solve these problems, we propose a new aspect-level sentiment analysis model that combines a multi-channel convolutional network and graph convolutional network (GCN). Firstly, the context and the degree of association between words are characterized by Long Short-Term Memory (LSTM) and self-attention mechanism. Besides, a multi-channel convolutional network is used to extract the features of words in different ranges. Finally, a convolutional graph network is used to associate the node information of the dependency tree structure. We conduct experiments on four benchmark datasets. The experimental results are compared with those of other models, which shows that our model is better and more effective. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=aspect-level%20sentiment%20analysis" title="aspect-level sentiment analysis">aspect-level sentiment analysis</a>, <a href="https://publications.waset.org/abstracts/search?q=attention" title=" attention"> attention</a>, <a href="https://publications.waset.org/abstracts/search?q=multi-channel%20convolution%20network" title=" multi-channel convolution network"> multi-channel convolution network</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20convolution%20network" title=" graph convolution network"> graph convolution network</a>, <a href="https://publications.waset.org/abstracts/search?q=dependency%20tree" title=" dependency tree"> dependency tree</a> </p> <a href="https://publications.waset.org/abstracts/146513/aspect-level-sentiment-analysis-with-multi-channel-and-graph-convolutional-networks" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/146513.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">230</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5681</span> MhAGCN: Multi-Head Attention Graph Convolutional Network for Web Services Classification</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Bing%20Li">Bing Li</a>, <a href="https://publications.waset.org/abstracts/search?q=Zhi%20Li"> Zhi Li</a>, <a href="https://publications.waset.org/abstracts/search?q=Yilong%20Yang"> Yilong Yang</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Web classification can promote the quality of service discovery and management in the service repository. It is widely used to locate developers desired services. Although traditional classification methods based on supervised learning models can achieve classification tasks, developers need to manually mark web services, and the quality of these tags may not be enough to establish an accurate classifier for service classification. With the doubling of the number of web services, the manual tagging method has become unrealistic. In recent years, the attention mechanism has made remarkable progress in the field of deep learning, and its huge potential has been fully demonstrated in various fields. This paper designs a multi-head attention graph convolutional network (MHAGCN) service classification method, which can assign different weights to the neighborhood nodes without complicated matrix operations or relying on understanding the entire graph structure. The framework combines the advantages of the attention mechanism and graph convolutional neural network. It can classify web services through automatic feature extraction. The comprehensive experimental results on a real dataset not only show the superior performance of the proposed model over the existing models but also demonstrate its potentially good interpretability for graph analysis. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=attention%20mechanism" title="attention mechanism">attention mechanism</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20convolutional%20network" title=" graph convolutional network"> graph convolutional network</a>, <a href="https://publications.waset.org/abstracts/search?q=interpretability" title=" interpretability"> interpretability</a>, <a href="https://publications.waset.org/abstracts/search?q=service%20classification" title=" service classification"> service classification</a>, <a href="https://publications.waset.org/abstracts/search?q=service%20discovery" title=" service discovery"> service discovery</a> </p> <a href="https://publications.waset.org/abstracts/131673/mhagcn-multi-head-attention-graph-convolutional-network-for-web-services-classification" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/131673.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">141</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5680</span> An Application of Graph Theory to The Electrical Circuit Using Matrix Method</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Samai%27la%20Abdullahi">Samai&#039;la Abdullahi</a> </p> <p class="card-text"><strong>Abstract:</strong></p> A graph is a pair of two set and so that a graph is a pictorial representation of a system using two basic element nodes and edges. A node is represented by a circle (either hallo shade) and edge is represented by a line segment connecting two nodes together. In this paper, we present a circuit network in the concept of graph theory application and also circuit models of graph are represented in logical connection method were we formulate matrix method of adjacency and incidence of matrix and application of truth table. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=euler%20circuit%20and%20path" title="euler circuit and path">euler circuit and path</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20representation%20of%20circuit%20networks" title=" graph representation of circuit networks"> graph representation of circuit networks</a>, <a href="https://publications.waset.org/abstracts/search?q=representation%20of%20graph%20models" title=" representation of graph models"> representation of graph models</a>, <a href="https://publications.waset.org/abstracts/search?q=representation%20of%20circuit%20network%20using%20logical%20truth%20table" title=" representation of circuit network using logical truth table"> representation of circuit network using logical truth table</a> </p> <a href="https://publications.waset.org/abstracts/32358/an-application-of-graph-theory-to-the-electrical-circuit-using-matrix-method" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/32358.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">568</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5679</span> Solving the Quadratic Programming Problem Using a Recurrent Neural Network</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=A.%20A.%20Behroozpoor">A. A. Behroozpoor</a>, <a href="https://publications.waset.org/abstracts/search?q=M.%20M.%20Mazarei"> M. M. Mazarei </a> </p> <p class="card-text"><strong>Abstract:</strong></p> In this paper, a fuzzy recurrent neural network is proposed for solving the classical quadratic control problem subject to linear equality and bound constraints. The convergence of the state variables of the proposed neural network to achieve solution optimality is guaranteed. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=REFERENCES%20%20%0D%0A%5B1%5D%09Xia" title="REFERENCES [1] Xia">REFERENCES [1] Xia</a>, <a href="https://publications.waset.org/abstracts/search?q=Y" title=" Y"> Y</a>, <a href="https://publications.waset.org/abstracts/search?q=A%20new%20neural%20network%20for%20solving%20linear%20and%20quadratic%20programming%20problems.%20IEEE%20Transactions%20on%20Neural%20Networks" title=" A new neural network for solving linear and quadratic programming problems. IEEE Transactions on Neural Networks"> A new neural network for solving linear and quadratic programming problems. IEEE Transactions on Neural Networks</a>, <a href="https://publications.waset.org/abstracts/search?q=7%286%29" title=" 7(6)"> 7(6)</a>, <a href="https://publications.waset.org/abstracts/search?q=1996" title=" 1996"> 1996</a>, <a href="https://publications.waset.org/abstracts/search?q=pp.1544%E2%80%931548.%0D%0A%5B2%5D%09Xia" title=" pp.1544–1548. [2] Xia"> pp.1544–1548. [2] Xia</a>, <a href="https://publications.waset.org/abstracts/search?q=Y." title=" Y."> Y.</a>, <a href="https://publications.waset.org/abstracts/search?q=%26%20Wang" title=" &amp; Wang"> &amp; Wang</a>, <a href="https://publications.waset.org/abstracts/search?q=J" title=" J"> J</a>, <a href="https://publications.waset.org/abstracts/search?q=A%20recurrent%20neural%20network%20for%20solving%20nonlinear%20convex%20programs%20subject%20to%20linear%20constraints.%20IEEE%20Transactions%20on%20Neural%20Networks" title=" A recurrent neural network for solving nonlinear convex programs subject to linear constraints. IEEE Transactions on Neural Networks"> A recurrent neural network for solving nonlinear convex programs subject to linear constraints. IEEE Transactions on Neural Networks</a>, <a href="https://publications.waset.org/abstracts/search?q=16%282%29" title="16(2)">16(2)</a>, <a href="https://publications.waset.org/abstracts/search?q=2005" title=" 2005"> 2005</a>, <a href="https://publications.waset.org/abstracts/search?q=pp.%20379%E2%80%93386.%0D%0A%5B3%5D%09Xia" title=" pp. 379–386. [3] Xia"> pp. 379–386. [3] Xia</a>, <a href="https://publications.waset.org/abstracts/search?q=Y." title=" Y."> Y.</a>, <a href="https://publications.waset.org/abstracts/search?q=H" title=" H"> H</a>, <a href="https://publications.waset.org/abstracts/search?q=Leung" title=" Leung"> Leung</a>, <a href="https://publications.waset.org/abstracts/search?q=%26%20J" title=" &amp; J"> &amp; J</a>, <a href="https://publications.waset.org/abstracts/search?q=Wang" title=" Wang"> Wang</a>, <a href="https://publications.waset.org/abstracts/search?q=A%20projection%20neural%20network%20and%20its%20application%20to%20constrained%20optimization%20problems.%20IEEE%20Transactions%20Circuits%20and%20Systems-I" title=" A projection neural network and its application to constrained optimization problems. IEEE Transactions Circuits and Systems-I"> A projection neural network and its application to constrained optimization problems. IEEE Transactions Circuits and Systems-I</a>, <a href="https://publications.waset.org/abstracts/search?q=49%284%29" title=" 49(4)"> 49(4)</a>, <a href="https://publications.waset.org/abstracts/search?q=2002" title=" 2002"> 2002</a>, <a href="https://publications.waset.org/abstracts/search?q=pp.447%E2%80%93458.B.%20%0D%0A%5B4%5D%09Q.%20Liu" title=" pp.447–458.B. [4] Q. Liu"> pp.447–458.B. [4] Q. Liu</a>, <a href="https://publications.waset.org/abstracts/search?q=Z.%20Guo" title=" Z. Guo"> Z. Guo</a>, <a href="https://publications.waset.org/abstracts/search?q=J.%20Wang" title=" J. Wang"> J. Wang</a>, <a href="https://publications.waset.org/abstracts/search?q=A%20one-layer%20recurrent%20neural%20network%20for%20constrained%20seudoconvex%20optimization%20and%20its%20application%20for%20dynamic%20portfolio%20optimization.%20Neural%20Networks" title=" A one-layer recurrent neural network for constrained seudoconvex optimization and its application for dynamic portfolio optimization. Neural Networks"> A one-layer recurrent neural network for constrained seudoconvex optimization and its application for dynamic portfolio optimization. Neural Networks</a>, <a href="https://publications.waset.org/abstracts/search?q=26" title=" 26"> 26</a>, <a href="https://publications.waset.org/abstracts/search?q=2012" title=" 2012"> 2012</a>, <a href="https://publications.waset.org/abstracts/search?q=pp.%2099-109." title=" pp. 99-109. "> pp. 99-109. </a> </p> <a href="https://publications.waset.org/abstracts/19435/solving-the-quadratic-programming-problem-using-a-recurrent-neural-network" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/19435.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">651</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5678</span> A Summary-Based Text Classification Model for Graph Attention Networks</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Shuo%20Liu">Shuo Liu</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In Chinese text classification tasks, redundant words and phrases can interfere with the formation of extracted and analyzed text information, leading to a decrease in the accuracy of the classification model. To reduce irrelevant elements, extract and utilize text content information more efficiently and improve the accuracy of text classification models. In this paper, the text in the corpus is first extracted using the TextRank algorithm for abstraction, the words in the abstract are used as nodes to construct a text graph, and then the graph attention network (GAT) is used to complete the task of classifying the text. Testing on a Chinese dataset from the network, the classification accuracy was improved over the direct method of generating graph structures using text. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=Chinese%20natural%20language%20processing" title="Chinese natural language processing">Chinese natural language processing</a>, <a href="https://publications.waset.org/abstracts/search?q=text%20classification" title=" text classification"> text classification</a>, <a href="https://publications.waset.org/abstracts/search?q=abstract%20extraction" title=" abstract extraction"> abstract extraction</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20attention%20network" title=" graph attention network"> graph attention network</a> </p> <a href="https://publications.waset.org/abstracts/158060/a-summary-based-text-classification-model-for-graph-attention-networks" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/158060.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">110</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5677</span> Selecting the Best RBF Neural Network Using PSO Algorithm for ECG Signal Prediction</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Najmeh%20Mohsenifar">Najmeh Mohsenifar</a>, <a href="https://publications.waset.org/abstracts/search?q=Narjes%20Mohsenifar"> Narjes Mohsenifar</a>, <a href="https://publications.waset.org/abstracts/search?q=Abbas%20Kargar"> Abbas Kargar</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In this paper, has been presented a stable method for predicting the ECG signals through the RBF neural networks, by the PSO algorithm. In spite of quasi-periodic ECG signal from a healthy person, there are distortions in electro cardiographic data for a patient. Therefore, there is no precise mathematical model for prediction. Here, we have exploited neural networks that are capable of complicated nonlinear mapping. Although the architecture and spread of RBF networks are usually selected through trial and error, the PSO algorithm has been used for choosing the best neural network. In this way, 2 second of a recorded ECG signal is employed to predict duration of 20 second in advance. Our simulations show that PSO algorithm can find the RBF neural network with minimum MSE and the accuracy of the predicted ECG signal is 97 %. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=electrocardiogram" title="electrocardiogram">electrocardiogram</a>, <a href="https://publications.waset.org/abstracts/search?q=RBF%20artificial%20neural%20network" title=" RBF artificial neural network"> RBF artificial neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=PSO%20algorithm" title=" PSO algorithm"> PSO algorithm</a>, <a href="https://publications.waset.org/abstracts/search?q=predict" title=" predict"> predict</a>, <a href="https://publications.waset.org/abstracts/search?q=accuracy" title=" accuracy"> accuracy</a> </p> <a href="https://publications.waset.org/abstracts/33466/selecting-the-best-rbf-neural-network-using-pso-algorithm-for-ecg-signal-prediction" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/33466.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">633</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5676</span> Assessing Artificial Neural Network Models on Forecasting the Return of Stock Market Index</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Hamid%20Rostami%20Jaz">Hamid Rostami Jaz</a>, <a href="https://publications.waset.org/abstracts/search?q=Kamran%20Ameri%20Siahooei"> Kamran Ameri Siahooei</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Up to now different methods have been used to forecast the index returns and the index rate. Artificial intelligence and artificial neural networks have been one of the methods of index returns forecasting. This study attempts to carry out a comparative study on the performance of different Radial Base Neural Network and Feed-Forward Perceptron Neural Network to forecast investment returns on the index. To achieve this goal, the return on investment in Tehran Stock Exchange index is evaluated and the performance of Radial Base Neural Network and Feed-Forward Perceptron Neural Network are compared. Neural networks performance test is applied based on the least square error in two approaches of in-sample and out-of-sample. The research results show the superiority of the radial base neural network in the in-sample approach and the superiority of perceptron neural network in the out-of-sample approach. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=exchange%20index" title="exchange index">exchange index</a>, <a href="https://publications.waset.org/abstracts/search?q=forecasting" title=" forecasting"> forecasting</a>, <a href="https://publications.waset.org/abstracts/search?q=perceptron%20neural%20network" title=" perceptron neural network"> perceptron neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=Tehran%20stock%20exchange" title=" Tehran stock exchange"> Tehran stock exchange</a> </p> <a href="https://publications.waset.org/abstracts/51503/assessing-artificial-neural-network-models-on-forecasting-the-return-of-stock-market-index" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/51503.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">470</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5675</span> The Application of a Hybrid Neural Network for Recognition of a Handwritten Kazakh Text</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Almagul%20%20Assainova">Almagul Assainova </a>, <a href="https://publications.waset.org/abstracts/search?q=Dariya%20Abykenova"> Dariya Abykenova</a>, <a href="https://publications.waset.org/abstracts/search?q=Liudmila%20Goncharenko"> Liudmila Goncharenko</a>, <a href="https://publications.waset.org/abstracts/search?q=Sergey%20%20Sybachin"> Sergey Sybachin</a>, <a href="https://publications.waset.org/abstracts/search?q=Saule%20Rakhimova"> Saule Rakhimova</a>, <a href="https://publications.waset.org/abstracts/search?q=Abay%20Aman"> Abay Aman</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The recognition of a handwritten Kazakh text is a relevant objective today for the digitization of materials. The study presents a model of a hybrid neural network for handwriting recognition, which includes a convolutional neural network and a multi-layer perceptron. Each network includes 1024 input neurons and 42 output neurons. The model is implemented in the program, written in the Python programming language using the EMNIST database, NumPy, Keras, and Tensorflow modules. The neural network training of such specific letters of the Kazakh alphabet as ә, ғ, қ, ң, ө, ұ, ү, h, і was conducted. The neural network model and the program created on its basis can be used in electronic document management systems to digitize the Kazakh text. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=handwriting%20recognition%20system" title="handwriting recognition system">handwriting recognition system</a>, <a href="https://publications.waset.org/abstracts/search?q=image%20recognition" title=" image recognition"> image recognition</a>, <a href="https://publications.waset.org/abstracts/search?q=Kazakh%20font" title=" Kazakh font"> Kazakh font</a>, <a href="https://publications.waset.org/abstracts/search?q=machine%20learning" title=" machine learning"> machine learning</a>, <a href="https://publications.waset.org/abstracts/search?q=neural%20networks" title=" neural networks"> neural networks</a> </p> <a href="https://publications.waset.org/abstracts/129773/the-application-of-a-hybrid-neural-network-for-recognition-of-a-handwritten-kazakh-text" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/129773.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">268</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5674</span> Artificial Neural Network Speed Controller for Excited DC Motor</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Elabed%20Saud">Elabed Saud</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This paper introduces the new ability of Artificial Neural Networks (ANNs) in estimating speed and controlling the separately excited DC motor. The neural control scheme consists of two parts. One is the neural estimator which is used to estimate the motor speed. The other is the neural controller which is used to generate a control signal for a converter. These two neutrals are training by Levenberg-Marquardt back-propagation algorithm. ANNs are the standard three layers feed-forward neural network with sigmoid activation functions in the input and hidden layers and purelin in the output layer. Simulation results are presented to demonstrate the effectiveness of this neural and advantage of the control system DC motor with ANNs in comparison with the conventional scheme without ANNs. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=Artificial%20Neural%20Network%20%28ANNs%29" title="Artificial Neural Network (ANNs)">Artificial Neural Network (ANNs)</a>, <a href="https://publications.waset.org/abstracts/search?q=excited%20DC%20motor" title=" excited DC motor"> excited DC motor</a>, <a href="https://publications.waset.org/abstracts/search?q=convenional%20controller" title=" convenional controller"> convenional controller</a>, <a href="https://publications.waset.org/abstracts/search?q=speed%20Controller" title=" speed Controller"> speed Controller</a> </p> <a href="https://publications.waset.org/abstracts/21941/artificial-neural-network-speed-controller-for-excited-dc-motor" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/21941.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">732</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5673</span> Design of Neural Predictor for Vibration Analysis of Drilling Machine</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=%C4%B0kbal%20Eski">İkbal Eski </a> </p> <p class="card-text"><strong>Abstract:</strong></p> This investigation is researched on design of robust neural network predictors for analyzing vibration effects on moving parts of a drilling machine. Moreover, the research is divided two parts; first part is experimental investigation, second part is simulation analysis with neural networks. Therefore, a real time the drilling machine is used to vibrations during working conditions. The measured real vibration parameters are analyzed with proposed neural network. As results: Simulation approaches show that Radial Basis Neural Network has good performance to adapt real time parameters of the drilling machine. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20neural%20network" title="artificial neural network">artificial neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=vibration%20analyses" title=" vibration analyses"> vibration analyses</a>, <a href="https://publications.waset.org/abstracts/search?q=drilling%20machine" title=" drilling machine"> drilling machine</a>, <a href="https://publications.waset.org/abstracts/search?q=robust" title=" robust"> robust</a> </p> <a href="https://publications.waset.org/abstracts/30313/design-of-neural-predictor-for-vibration-analysis-of-drilling-machine" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/30313.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">400</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5672</span> Trusted Neural Network: Reversibility in Neural Networks for Network Integrity Verification</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Malgorzata%20Schwab">Malgorzata Schwab</a>, <a href="https://publications.waset.org/abstracts/search?q=Ashis%20Kumer%20Biswas"> Ashis Kumer Biswas</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In this concept paper, we explore the topic of Reversibility in Neural Networks leveraged for Network Integrity Verification and crafted the term ''Trusted Neural Network'' (TNN), paired with the API abstraction around it, to embrace the idea formally. This newly proposed high-level generalizable TNN model builds upon the Invertible Neural Network architecture, trained simultaneously in both forward and reverse directions. This allows for the original system inputs to be compared with the ones reconstructed from the outputs in the reversed flow to assess the integrity of the end-to-end inference flow. The outcome of that assessment is captured as an Integrity Score. Concrete implementation reflecting the needs of specific problem domains can be derived from this general approach and is demonstrated in the experiments. The model aspires to become a useful practice in drafting high-level systems architectures which incorporate AI capabilities. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=trusted" title="trusted">trusted</a>, <a href="https://publications.waset.org/abstracts/search?q=neural" title=" neural"> neural</a>, <a href="https://publications.waset.org/abstracts/search?q=invertible" title=" invertible"> invertible</a>, <a href="https://publications.waset.org/abstracts/search?q=API" title=" API"> API</a> </p> <a href="https://publications.waset.org/abstracts/144758/trusted-neural-network-reversibility-in-neural-networks-for-network-integrity-verification" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/144758.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">152</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5671</span> A Framework for Chinese Domain-Specific Distant Supervised Named Entity Recognition</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Qin%20Long">Qin Long</a>, <a href="https://publications.waset.org/abstracts/search?q=Li%20Xiaoge"> Li Xiaoge</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The Knowledge Graphs have now become a new form of knowledge representation. However, there is no consensus in regard to a plausible and definition of entities and relationships in the domain-specific knowledge graph. Further, in conjunction with several limitations and deficiencies, various domain-specific entities and relationships recognition approaches are far from perfect. Specifically, named entity recognition in Chinese domain is a critical task for the natural language process applications. However, a bottleneck problem with Chinese named entity recognition in new domains is the lack of annotated data. To address this challenge, a domain distant supervised named entity recognition framework is proposed. The framework is divided into two stages: first, the distant supervised corpus is generated based on the entity linking model of graph attention neural network; secondly, the generated corpus is trained as the input of the distant supervised named entity recognition model to train to obtain named entities. The link model is verified in the ccks2019 entity link corpus, and the F1 value is 2% higher than that of the benchmark method. The re-pre-trained BERT language model is added to the benchmark method, and the results show that it is more suitable for distant supervised named entity recognition tasks. Finally, it is applied in the computer field, and the results show that this framework can obtain domain named entities. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=distant%20named%20entity%20recognition" title="distant named entity recognition">distant named entity recognition</a>, <a href="https://publications.waset.org/abstracts/search?q=entity%20linking" title=" entity linking"> entity linking</a>, <a href="https://publications.waset.org/abstracts/search?q=knowledge%20graph" title=" knowledge graph"> knowledge graph</a>, <a href="https://publications.waset.org/abstracts/search?q=graph%20attention%20neural%20network" title=" graph attention neural network"> graph attention neural network</a> </p> <a href="https://publications.waset.org/abstracts/145772/a-framework-for-chinese-domain-specific-distant-supervised-named-entity-recognition" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/145772.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">101</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5670</span> Prediction of Oil Recovery Factor Using Artificial Neural Network</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=O.%20P.%20Oladipo">O. P. Oladipo</a>, <a href="https://publications.waset.org/abstracts/search?q=O.%20A.%20Falode"> O. A. Falode</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The determination of Recovery Factor is of great importance to the reservoir engineer since it relates reserves to the initial oil in place. Reserves are the producible portion of reservoirs and give an indication of the profitability of a field Development. The core objective of this project is to develop an artificial neural network model using selected reservoir data to predict Recovery Factors (RF) of hydrocarbon reservoirs and compare the model with a couple of the existing correlations. The type of Artificial Neural Network model developed was the Single Layer Feed Forward Network. MATLAB was used as the network simulator and the network was trained using the supervised learning method, Afterwards, the network was tested with input data never seen by the network. The results of the predicted values of the recovery factors of the Artificial Neural Network Model, API Correlation for water drive reservoirs (Sands and Sandstones) and Guthrie and Greenberger Correlation Equation were obtained and compared. It was noted that the coefficient of correlation of the Artificial Neural Network Model was higher than the coefficient of correlations of the other two correlation equations, thus making it a more accurate prediction tool. The Artificial Neural Network, because of its accurate prediction ability is helpful in the correct prediction of hydrocarbon reservoir factors. Artificial Neural Network could be applied in the prediction of other Petroleum Engineering parameters because it is able to recognise complex patterns of data set and establish a relationship between them. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=recovery%20factor" title="recovery factor">recovery factor</a>, <a href="https://publications.waset.org/abstracts/search?q=reservoir" title=" reservoir"> reservoir</a>, <a href="https://publications.waset.org/abstracts/search?q=reserves" title=" reserves"> reserves</a>, <a href="https://publications.waset.org/abstracts/search?q=artificial%20neural%20network" title=" artificial neural network"> artificial neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=hydrocarbon" title=" hydrocarbon"> hydrocarbon</a>, <a href="https://publications.waset.org/abstracts/search?q=MATLAB" title=" MATLAB"> MATLAB</a>, <a href="https://publications.waset.org/abstracts/search?q=API" title=" API"> API</a>, <a href="https://publications.waset.org/abstracts/search?q=Guthrie" title=" Guthrie"> Guthrie</a>, <a href="https://publications.waset.org/abstracts/search?q=Greenberger" title=" Greenberger"> Greenberger</a> </p> <a href="https://publications.waset.org/abstracts/18896/prediction-of-oil-recovery-factor-using-artificial-neural-network" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/18896.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">449</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5669</span> A Two-Step Framework for Unsupervised Speaker Segmentation Using BIC and Artificial Neural Network</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Ahmad%20Alwosheel">Ahmad Alwosheel</a>, <a href="https://publications.waset.org/abstracts/search?q=Ahmed%20Alqaraawi"> Ahmed Alqaraawi</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This work proposes a new speaker segmentation approach for two speakers. It is an online approach that does not require a prior information about speaker models. It has two phases, a conventional approach such as unsupervised BIC-based is utilized in the first phase to detect speaker changes and train a Neural Network, while in the second phase, the output trained parameters from the Neural Network are used to predict next incoming audio stream. Using this approach, a comparable accuracy to similar BIC-based approaches is achieved with a significant improvement in terms of computation time. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20neural%20network" title="artificial neural network">artificial neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=diarization" title=" diarization"> diarization</a>, <a href="https://publications.waset.org/abstracts/search?q=speaker%20indexing" title=" speaker indexing"> speaker indexing</a>, <a href="https://publications.waset.org/abstracts/search?q=speaker%20segmentation" title=" speaker segmentation"> speaker segmentation</a> </p> <a href="https://publications.waset.org/abstracts/27191/a-two-step-framework-for-unsupervised-speaker-segmentation-using-bic-and-artificial-neural-network" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/27191.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">511</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">5668</span> Optimizing the Probabilistic Neural Network Training Algorithm for Multi-Class Identification</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Abdelhadi%20Lotfi">Abdelhadi Lotfi</a>, <a href="https://publications.waset.org/abstracts/search?q=Abdelkader%20Benyettou"> Abdelkader Benyettou</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In this work, a training algorithm for probabilistic neural networks (PNN) is presented. The algorithm addresses one of the major drawbacks of PNN, which is the size of the hidden layer in the network. By using a cross-validation training algorithm, the number of hidden neurons is shrunk to a smaller number consisting of the most representative samples of the training set. This is done without affecting the overall architecture of the network. Performance of the network is compared against performance of standard PNN for different databases from the UCI database repository. Results show an important gain in network size and performance. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=classification" title="classification">classification</a>, <a href="https://publications.waset.org/abstracts/search?q=probabilistic%20neural%20networks" title=" probabilistic neural networks"> probabilistic neural networks</a>, <a href="https://publications.waset.org/abstracts/search?q=network%20optimization" title=" network optimization"> network optimization</a>, <a href="https://publications.waset.org/abstracts/search?q=pattern%20recognition" title=" pattern recognition"> pattern recognition</a> </p> <a href="https://publications.waset.org/abstracts/104139/optimizing-the-probabilistic-neural-network-training-algorithm-for-multi-class-identification" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/104139.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">271</span> </span> </div> </div> <ul class="pagination"> <li class="page-item disabled"><span class="page-link">&lsaquo;</span></li> <li class="page-item active"><span class="page-link">1</span></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=2">2</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=3">3</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=4">4</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=5">5</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=6">6</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=7">7</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=8">8</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=9">9</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=10">10</a></li> <li class="page-item disabled"><span class="page-link">...</span></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=189">189</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=190">190</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=Graph%20Neural%20%20Network%20%28GNN%29&amp;page=2" rel="next">&rsaquo;</a></li> </ul> </div> </main> <footer> <div id="infolinks" class="pt-3 pb-2"> <div class="container"> <div style="background-color:#f5f5f5;" class="p-3"> <div class="row"> <div class="col-md-2"> <ul class="list-unstyled"> About <li><a href="https://waset.org/page/support">About Us</a></li> <li><a href="https://waset.org/page/support#legal-information">Legal</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/WASET-16th-foundational-anniversary.pdf">WASET celebrates its 16th foundational anniversary</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Account <li><a href="https://waset.org/profile">My Account</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Explore <li><a href="https://waset.org/disciplines">Disciplines</a></li> <li><a href="https://waset.org/conferences">Conferences</a></li> <li><a href="https://waset.org/conference-programs">Conference Program</a></li> <li><a href="https://waset.org/committees">Committees</a></li> <li><a href="https://publications.waset.org">Publications</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Research <li><a href="https://publications.waset.org/abstracts">Abstracts</a></li> <li><a href="https://publications.waset.org">Periodicals</a></li> <li><a href="https://publications.waset.org/archive">Archive</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Open Science <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Science-Philosophy.pdf">Open Science Philosophy</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Science-Award.pdf">Open Science Award</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Society-Open-Science-and-Open-Innovation.pdf">Open Innovation</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Postdoctoral-Fellowship-Award.pdf">Postdoctoral Fellowship Award</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Scholarly-Research-Review.pdf">Scholarly Research Review</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Support <li><a href="https://waset.org/page/support">Support</a></li> <li><a href="https://waset.org/profile/messages/create">Contact Us</a></li> <li><a href="https://waset.org/profile/messages/create">Report Abuse</a></li> </ul> </div> </div> </div> </div> </div> <div class="container text-center"> <hr style="margin-top:0;margin-bottom:.3rem;"> <a href="https://creativecommons.org/licenses/by/4.0/" target="_blank" class="text-muted small">Creative Commons Attribution 4.0 International License</a> <div id="copy" class="mt-2">&copy; 2025 World Academy of Science, Engineering and Technology</div> </div> </footer> <a href="javascript:" id="return-to-top"><i class="fas fa-arrow-up"></i></a> <div class="modal" id="modal-template"> <div class="modal-dialog"> <div class="modal-content"> <div class="row m-0 mt-1"> <div class="col-md-12"> <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">&times;</span></button> </div> </div> <div class="modal-body"></div> </div> </div> </div> <script src="https://cdn.waset.org/static/plugins/jquery-3.3.1.min.js"></script> <script src="https://cdn.waset.org/static/plugins/bootstrap-4.2.1/js/bootstrap.bundle.min.js"></script> <script src="https://cdn.waset.org/static/js/site.js?v=150220211556"></script> <script> jQuery(document).ready(function() { /*jQuery.get("https://publications.waset.org/xhr/user-menu", function (response) { jQuery('#mainNavMenu').append(response); });*/ jQuery.get({ url: "https://publications.waset.org/xhr/user-menu", cache: false }).then(function(response){ jQuery('#mainNavMenu').append(response); }); }); </script> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10