CINXE.COM
Search results for: artificial vision
<!DOCTYPE html> <html lang="en" dir="ltr"> <head> <!-- Google tag (gtag.js) --> <script async src="https://www.googletagmanager.com/gtag/js?id=G-P63WKM1TM1"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-P63WKM1TM1'); </script> <!-- Yandex.Metrika counter --> <script type="text/javascript" > (function(m,e,t,r,i,k,a){m[i]=m[i]||function(){(m[i].a=m[i].a||[]).push(arguments)}; m[i].l=1*new Date(); for (var j = 0; j < document.scripts.length; j++) {if (document.scripts[j].src === r) { return; }} k=e.createElement(t),a=e.getElementsByTagName(t)[0],k.async=1,k.src=r,a.parentNode.insertBefore(k,a)}) (window, document, "script", "https://mc.yandex.ru/metrika/tag.js", "ym"); ym(55165297, "init", { clickmap:false, trackLinks:true, accurateTrackBounce:true, webvisor:false }); </script> <noscript><div><img src="https://mc.yandex.ru/watch/55165297" style="position:absolute; left:-9999px;" alt="" /></div></noscript> <!-- /Yandex.Metrika counter --> <!-- Matomo --> <script> var _paq = window._paq = window._paq || []; /* tracker methods like "setCustomDimension" should be called before "trackPageView" */ _paq.push(['trackPageView']); _paq.push(['enableLinkTracking']); (function() { var u="//matomo.waset.org/"; _paq.push(['setTrackerUrl', u+'matomo.php']); _paq.push(['setSiteId', '2']); var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0]; g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s); })(); </script> <!-- End Matomo Code --> <title>Search results for: artificial vision</title> <meta name="description" content="Search results for: artificial vision"> <meta name="keywords" content="artificial vision"> <meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1, user-scalable=no"> <meta charset="utf-8"> <link href="https://cdn.waset.org/favicon.ico" type="image/x-icon" rel="shortcut icon"> <link href="https://cdn.waset.org/static/plugins/bootstrap-4.2.1/css/bootstrap.min.css" rel="stylesheet"> <link href="https://cdn.waset.org/static/plugins/fontawesome/css/all.min.css" rel="stylesheet"> <link href="https://cdn.waset.org/static/css/site.css?v=150220211555" rel="stylesheet"> </head> <body> <header> <div class="container"> <nav class="navbar navbar-expand-lg navbar-light"> <a class="navbar-brand" href="https://waset.org"> <img src="https://cdn.waset.org/static/images/wasetc.png" alt="Open Science Research Excellence" title="Open Science Research Excellence" /> </a> <button class="d-block d-lg-none navbar-toggler ml-auto" type="button" data-toggle="collapse" data-target="#navbarMenu" aria-controls="navbarMenu" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> <div class="w-100"> <div class="d-none d-lg-flex flex-row-reverse"> <form method="get" action="https://waset.org/search" class="form-inline my-2 my-lg-0"> <input class="form-control mr-sm-2" type="search" placeholder="Search Conferences" value="artificial vision" name="q" aria-label="Search"> <button class="btn btn-light my-2 my-sm-0" type="submit"><i class="fas fa-search"></i></button> </form> </div> <div class="collapse navbar-collapse mt-1" id="navbarMenu"> <ul class="navbar-nav ml-auto align-items-center" id="mainNavMenu"> <li class="nav-item"> <a class="nav-link" href="https://waset.org/conferences" title="Conferences in 2025/2026/2027">Conferences</a> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/disciplines" title="Disciplines">Disciplines</a> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/committees" rel="nofollow">Committees</a> </li> <li class="nav-item dropdown"> <a class="nav-link dropdown-toggle" href="#" id="navbarDropdownPublications" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> Publications </a> <div class="dropdown-menu" aria-labelledby="navbarDropdownPublications"> <a class="dropdown-item" href="https://publications.waset.org/abstracts">Abstracts</a> <a class="dropdown-item" href="https://publications.waset.org">Periodicals</a> <a class="dropdown-item" href="https://publications.waset.org/archive">Archive</a> </div> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/page/support" title="Support">Support</a> </li> </ul> </div> </div> </nav> </div> </header> <main> <div class="container mt-4"> <div class="row"> <div class="col-md-9 mx-auto"> <form method="get" action="https://publications.waset.org/abstracts/search"> <div id="custom-search-input"> <div class="input-group"> <i class="fas fa-search"></i> <input type="text" class="search-query" name="q" placeholder="Author, Title, Abstract, Keywords" value="artificial vision"> <input type="submit" class="btn_search" value="Search"> </div> </div> </form> </div> </div> <div class="row mt-3"> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Commenced</strong> in January 2007</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Frequency:</strong> Monthly</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Edition:</strong> International</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Paper Count:</strong> 3211</div> </div> </div> </div> <h1 class="mt-3 mb-3 text-center" style="font-size:1.6rem;">Search results for: artificial vision</h1> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3211</span> Inspection of Railway Track Fastening Elements Using Artificial Vision</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Abdelkrim%20Belhaoua">Abdelkrim Belhaoua</a>, <a href="https://publications.waset.org/abstracts/search?q=Jean-Pierre%20Radoux"> Jean-Pierre Radoux</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In France, the railway network is one of the main transport infrastructures and is the second largest European network. Therefore, railway inspection is an important task in railway maintenance to ensure safety for passengers using significant means in personal and technical facilities. Artificial vision has recently been applied to several railway applications due to its potential to improve the efficiency and accuracy when analyzing large databases of acquired images. In this paper, we present a vision system able to detect fastening elements based on artificial vision approach. This system acquires railway images using a CCD camera installed under a control carriage. These images are stitched together before having processed. Experimental results are presented to show that the proposed method is robust for detection fasteners in a complex environment. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=computer%20vision" title="computer vision">computer vision</a>, <a href="https://publications.waset.org/abstracts/search?q=image%20processing" title=" image processing"> image processing</a>, <a href="https://publications.waset.org/abstracts/search?q=railway%20inspection" title=" railway inspection"> railway inspection</a>, <a href="https://publications.waset.org/abstracts/search?q=image%20stitching" title=" image stitching"> image stitching</a>, <a href="https://publications.waset.org/abstracts/search?q=fastener%20recognition" title=" fastener recognition"> fastener recognition</a>, <a href="https://publications.waset.org/abstracts/search?q=neural%20network" title=" neural network"> neural network</a> </p> <a href="https://publications.waset.org/abstracts/38749/inspection-of-railway-track-fastening-elements-using-artificial-vision" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/38749.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">468</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3210</span> Applying AI and IoT to Enhance Eye Vision Assessment, Early Detection of Eye Diseases, and Personalised Vision Correction</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Gasim%20Alandjani">Gasim Alandjani</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This research paper investigates the use of artificial intelligence (AI) and the Internet of Things (IoT) to improve eye healthcare; it concentrates on eye vision assessment, early discovery of eye ailments, and individualised vision correction. The study offers a broad review of literature and methodology; it features vital findings and inferences for advancing patient results, boosting admittance to care, elevating resource apportionment, and directing future research and practice. The study concluded that the assimilation of AI and IoT advancements provides progressive answers to traditional hurdles in eye healthcare, guaranteeing more precise, comprehensive, and individualised interventions for patients globally. The study emphasizes the significance of sustained innovation and the application of AI and IoT-driven methodologies to improve eye healthcare and vision for forthcoming generations. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=AI" title="AI">AI</a>, <a href="https://publications.waset.org/abstracts/search?q=IoT" title=" IoT"> IoT</a>, <a href="https://publications.waset.org/abstracts/search?q=eye%20vision%20assessment" title=" eye vision assessment"> eye vision assessment</a>, <a href="https://publications.waset.org/abstracts/search?q=computer%20engineering" title=" computer engineering"> computer engineering</a> </p> <a href="https://publications.waset.org/abstracts/196391/applying-ai-and-iot-to-enhance-eye-vision-assessment-early-detection-of-eye-diseases-and-personalised-vision-correction" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/196391.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">17</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3209</span> Using Artificial Vision Techniques for Dust Detection on Photovoltaic Panels</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Gustavo%20Funes">Gustavo Funes</a>, <a href="https://publications.waset.org/abstracts/search?q=Eduardo%20Peters"> Eduardo Peters</a>, <a href="https://publications.waset.org/abstracts/search?q=Jose%20Delpiano"> Jose Delpiano</a> </p> <p class="card-text"><strong>Abstract:</strong></p> It is widely known that photovoltaic technology has been massively distributed over the last decade despite its low-efficiency ratio. Dust deposition reduces this efficiency even more, lowering the energy production and module lifespan. In this work, we developed an artificial vision algorithm based on CIELAB color space to identify dust over panels in an autonomous way. We performed several experiments photographing three different types of panels, 30W, 340W and 410W. Those panels were soiled artificially with uniform and non-uniform distributed dust. The algorithm proposed uses statistical tools to provide a simulation with a 100% soiled panel and then performs a comparison to get the percentage of dirt in the experimental data set. The simulation uses a seed that is obtained by taking a dust sample from the maximum amount of dust from the dataset. The final result is the dirt percentage and the possible distribution of dust over the panel. Dust deposition is a key factor for plant owners to determine cleaning cycles or identify nonuniform depositions that could lead to module failure and hot spots. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=dust%20detection" title="dust detection">dust detection</a>, <a href="https://publications.waset.org/abstracts/search?q=photovoltaic" title=" photovoltaic"> photovoltaic</a>, <a href="https://publications.waset.org/abstracts/search?q=artificial%20vision" title=" artificial vision"> artificial vision</a>, <a href="https://publications.waset.org/abstracts/search?q=soiling" title=" soiling"> soiling</a> </p> <a href="https://publications.waset.org/abstracts/182064/using-artificial-vision-techniques-for-dust-detection-on-photovoltaic-panels" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/182064.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">55</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3208</span> A Combined Approach Based on Artificial Intelligence and Computer Vision for Qualitative Grading of Rice Grains</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Hemad%20Zareiforoush">Hemad Zareiforoush</a>, <a href="https://publications.waset.org/abstracts/search?q=Saeed%20Minaei"> Saeed Minaei</a>, <a href="https://publications.waset.org/abstracts/search?q=Ahmad%20Banakar"> Ahmad Banakar</a>, <a href="https://publications.waset.org/abstracts/search?q=Mohammad%20Reza%20Alizadeh"> Mohammad Reza Alizadeh</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The quality inspection of rice (Oryza sativa L.) during its various processing stages is very important. In this research, an artificial intelligence-based model coupled with computer vision techniques was developed as a decision support system for qualitative grading of rice grains. For conducting the experiments, first, 25 samples of rice grains with different levels of percentage of broken kernels (PBK) and degree of milling (DOM) were prepared and their qualitative grade was assessed by experienced experts. Then, the quality parameters of the same samples examined by experts were determined using a machine vision system. A grading model was developed based on fuzzy logic theory in MATLAB software for making a relationship between the qualitative characteristics of the product and its quality. Totally, 25 rules were used for qualitative grading based on AND operator and Mamdani inference system. The fuzzy inference system was consisted of two input linguistic variables namely, DOM and PBK, which were obtained by the machine vision system, and one output variable (quality of the product). The model output was finally defuzzified using Center of Maximum (COM) method. In order to evaluate the developed model, the output of the fuzzy system was compared with experts’ assessments. It was revealed that the developed model can estimate the qualitative grade of the product with an accuracy of 95.74%. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=machine%20vision" title="machine vision">machine vision</a>, <a href="https://publications.waset.org/abstracts/search?q=fuzzy%20logic" title=" fuzzy logic"> fuzzy logic</a>, <a href="https://publications.waset.org/abstracts/search?q=rice" title=" rice"> rice</a>, <a href="https://publications.waset.org/abstracts/search?q=quality" title=" quality"> quality</a> </p> <a href="https://publications.waset.org/abstracts/9943/a-combined-approach-based-on-artificial-intelligence-and-computer-vision-for-qualitative-grading-of-rice-grains" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/9943.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">425</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3207</span> Proposal for a Web System for the Control of Fungal Diseases in Grapes in Fruits Markets</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Carlos%20Tarme%C3%B1o%20Noriega">Carlos Tarmeño Noriega</a>, <a href="https://publications.waset.org/abstracts/search?q=Igor%20Aguilar%20Alonso"> Igor Aguilar Alonso</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Fungal diseases are common in vineyards; they cause a decrease in the quality of the products that can be sold, generating distrust of the customer towards the seller when buying fruit. Currently, technology allows the classification of fruits according to their characteristics thanks to artificial intelligence. This study proposes the implementation of a control system that allows the identification of the main fungal diseases present in the Italia grape, making use of a convolutional neural network (CNN), OpenCV, and TensorFlow. The methodology used was based on a collection of 20 articles referring to the proposed research on quality control, classification, and recognition of fruits through artificial vision techniques. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=computer%20vision" title="computer vision">computer vision</a>, <a href="https://publications.waset.org/abstracts/search?q=convolutional%20neural%20networks" title=" convolutional neural networks"> convolutional neural networks</a>, <a href="https://publications.waset.org/abstracts/search?q=quality%20control" title=" quality control"> quality control</a>, <a href="https://publications.waset.org/abstracts/search?q=fruit%20market" title=" fruit market"> fruit market</a>, <a href="https://publications.waset.org/abstracts/search?q=OpenCV" title=" OpenCV"> OpenCV</a>, <a href="https://publications.waset.org/abstracts/search?q=TensorFlow" title=" TensorFlow"> TensorFlow</a> </p> <a href="https://publications.waset.org/abstracts/160550/proposal-for-a-web-system-for-the-control-of-fungal-diseases-in-grapes-in-fruits-markets" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/160550.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">89</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3206</span> Control of Belts for Classification of Geometric Figures by Artificial Vision</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Juan%20Sebastian%20Huertas%20Piedrahita">Juan Sebastian Huertas Piedrahita</a>, <a href="https://publications.waset.org/abstracts/search?q=Jaime%20Arturo%20Lopez%20Duque"> Jaime Arturo Lopez Duque</a>, <a href="https://publications.waset.org/abstracts/search?q=Eduardo%20Luis%20Perez%20Londo%C3%B1o"> Eduardo Luis Perez Londoño</a>, <a href="https://publications.waset.org/abstracts/search?q=Juli%C3%A1n%20S.%20Rodr%C3%ADguez"> Julián S. Rodríguez</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The process of generating computer vision is called artificial vision. The artificial vision is a branch of artificial intelligence that allows the obtaining, processing, and analysis of any type of information especially the ones obtained through digital images. Actually the artificial vision is used in manufacturing areas for quality control and production, as these processes can be realized through counting algorithms, positioning, and recognition of objects that can be measured by a single camera (or more). On the other hand, the companies use assembly lines formed by conveyor systems with actuators on them for moving pieces from one location to another in their production. These devices must be previously programmed for their good performance and must have a programmed logic routine. Nowadays the production is the main target of every industry, quality, and the fast elaboration of the different stages and processes in the chain of production of any product or service being offered. The principal base of this project is to program a computer that recognizes geometric figures (circle, square, and triangle) through a camera, each one with a different color and link it with a group of conveyor systems to organize the mentioned figures in cubicles, which differ from one another also by having different colors. This project bases on artificial vision, therefore the methodology needed to develop this project must be strict, this one is detailed below: 1. Methodology: 1.1 The software used in this project is QT Creator which is linked with Open CV libraries. Together, these tools perform to realize the respective program to identify colors and forms directly from the camera to the computer. 1.2 Imagery acquisition: To start using the libraries of Open CV is necessary to acquire images, which can be captured by a computer’s web camera or a different specialized camera. 1.3 The recognition of RGB colors is realized by code, crossing the matrices of the captured images and comparing pixels, identifying the primary colors which are red, green, and blue. 1.4 To detect forms it is necessary to realize the segmentation of the images, so the first step is converting the image from RGB to grayscale, to work with the dark tones of the image, then the image is binarized which means having the figure of the image in a white tone with a black background. Finally, we find the contours of the figure in the image to detect the quantity of edges to identify which figure it is. 1.5 After the color and figure have been identified, the program links with the conveyor systems, which through the actuators will classify the figures in their respective cubicles. Conclusions: The Open CV library is a useful tool for projects in which an interface between a computer and the environment is required since the camera obtains external characteristics and realizes any process. With the program for this project any type of assembly line can be optimized because images from the environment can be obtained and the process would be more accurate. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20intelligence" title="artificial intelligence">artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=artificial%20vision" title=" artificial vision"> artificial vision</a>, <a href="https://publications.waset.org/abstracts/search?q=binarized" title=" binarized"> binarized</a>, <a href="https://publications.waset.org/abstracts/search?q=grayscale" title=" grayscale"> grayscale</a>, <a href="https://publications.waset.org/abstracts/search?q=images" title=" images"> images</a>, <a href="https://publications.waset.org/abstracts/search?q=RGB" title=" RGB "> RGB </a> </p> <a href="https://publications.waset.org/abstracts/32096/control-of-belts-for-classification-of-geometric-figures-by-artificial-vision" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/32096.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">382</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3205</span> Artificial Intelligence and Machine Vision-Based Defect Detection Methodology for Solid Rocket Motor Propellant Grains</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Sandip%20Suman">Sandip Suman</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Mechanical defects (cracks, voids, irregularities) in rocket motor propellant are not new and it is induced due to various reasons, which could be an improper manufacturing process, lot-to-lot variation in chemicals or just the natural aging of the products. These defects are normally identified during the examination of radiographic films by quality inspectors. However, a lot of times, these defects are under or over-classified by human inspectors, which leads to unpredictable performance during lot acceptance tests and significant economic loss. The human eye can only visualize larger cracks and defects in the radiographs, and it is almost impossible to visualize every small defect through the human eye. A different artificial intelligence-based machine vision methodology has been proposed in this work to identify and classify the structural defects in the radiographic films of rocket motors with solid propellant. The proposed methodology can extract the features of defects, characterize them, and make intelligent decisions for acceptance or rejection as per the customer requirements. This will automatize the defect detection process during manufacturing with human-like intelligence. It will also significantly reduce production downtime and help to restore processes in the least possible time. The proposed methodology is highly scalable and can easily be transferred to various products and processes. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20intelligence" title="artificial intelligence">artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=machine%20vision" title=" machine vision"> machine vision</a>, <a href="https://publications.waset.org/abstracts/search?q=defect%20detection" title=" defect detection"> defect detection</a>, <a href="https://publications.waset.org/abstracts/search?q=rocket%20motor%20propellant%20grains" title=" rocket motor propellant grains"> rocket motor propellant grains</a> </p> <a href="https://publications.waset.org/abstracts/168782/artificial-intelligence-and-machine-vision-based-defect-detection-methodology-for-solid-rocket-motor-propellant-grains" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/168782.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">110</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3204</span> Visual Improvement with Low Vision Aids in Children with Stargardt’s Disease</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Anum%20Akhter">Anum Akhter</a>, <a href="https://publications.waset.org/abstracts/search?q=Sumaira%20Altaf"> Sumaira Altaf</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Purpose: To study the effect of low vision devices i.e. telescope and magnifying glasses on distance visual acuity and near visual acuity of children with Stargardt’s disease. Setting: Low vision department, Alshifa Trust Eye Hospital, Rawalpindi, Pakistan. Methods: 52 children having Stargardt’s disease were included in the study. All children were diagnosed by pediatrics ophthalmologists. Comprehensive low vision assessment was done by me in Low vision clinic. Visual acuity was measured using ETDRS chart. Refraction and other supplementary tests were performed. Children with Stargardt’s disease were provided with different telescopes and magnifying glasses for improving far vision and near vision. Results: Out of 52 children, 17 children were males and 35 children were females. Distance visual acuity and near visual acuity improved significantly with low vision aid trial. All children showed visual acuity better than 6/19 with a telescope of higher magnification. Improvement in near visual acuity was also significant with magnifying glasses trial. Conclusions: Low vision aids are useful for improvement in visual acuity in children. Children with Stargardt’s disease who are having a problem in education and daily life activities can get help from low vision aids. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=Stargardt" title="Stargardt">Stargardt</a>, <a href="https://publications.waset.org/abstracts/search?q=s%20disease" title="s disease">s disease</a>, <a href="https://publications.waset.org/abstracts/search?q=low%20vision%20aids" title=" low vision aids"> low vision aids</a>, <a href="https://publications.waset.org/abstracts/search?q=telescope" title=" telescope"> telescope</a>, <a href="https://publications.waset.org/abstracts/search?q=magnifiers" title=" magnifiers"> magnifiers</a> </p> <a href="https://publications.waset.org/abstracts/24382/visual-improvement-with-low-vision-aids-in-children-with-stargardts-disease" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/24382.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">548</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3203</span> Artificial Intelligence Created Inventions</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=John%20Goodhue">John Goodhue</a>, <a href="https://publications.waset.org/abstracts/search?q=Xiaonan%20Wei"> Xiaonan Wei</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Current legal decisions and policies regarding the naming as artificial intelligence as inventor are reviewed with emphasis on the recent decisions by the European Patent Office regarding the DABUS inventions holding that an artificial intelligence machine cannot be an inventor. Next, a set of hypotheticals is introduced and examined to better understand how artificial intelligence might be used to create or assist in creating new inventions and how application of existing or proposed changes in the law would affect the ability to protect these inventions including due to restrictions on artificial intelligence for being named as inventors, ownership of inventions made by artificial intelligence, and the effects on legal standards for inventiveness or obviousness. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=Artificial%20intelligence" title="Artificial intelligence">Artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=innovation" title=" innovation"> innovation</a>, <a href="https://publications.waset.org/abstracts/search?q=invention" title=" invention"> invention</a>, <a href="https://publications.waset.org/abstracts/search?q=patent" title=" patent"> patent</a> </p> <a href="https://publications.waset.org/abstracts/121367/artificial-intelligence-created-inventions" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/121367.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">183</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3202</span> Optimizing Machine Vision System Setup Accuracy by Six-Sigma DMAIC Approach</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Joseph%20C.%20Chen">Joseph C. Chen</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Machine vision system provides automatic inspection to reduce manufacturing costs considerably. However, only a few principles have been found to optimize machine vision system and help it function more accurately in industrial practice. Mostly, there were complicated and impractical design techniques to improve the accuracy of machine vision system. This paper discusses implementing the Six Sigma Define, Measure, Analyze, Improve, and Control (DMAIC) approach to optimize the setup parameters of machine vision system when it is used as a direct measurement technique. This research follows a case study showing how Six Sigma DMAIC methodology has been put into use. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=DMAIC" title="DMAIC">DMAIC</a>, <a href="https://publications.waset.org/abstracts/search?q=machine%20vision%20system" title=" machine vision system"> machine vision system</a>, <a href="https://publications.waset.org/abstracts/search?q=process%20capability" title=" process capability"> process capability</a>, <a href="https://publications.waset.org/abstracts/search?q=Taguchi%20Parameter%20Design" title=" Taguchi Parameter Design"> Taguchi Parameter Design</a> </p> <a href="https://publications.waset.org/abstracts/68243/optimizing-machine-vision-system-setup-accuracy-by-six-sigma-dmaic-approach" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/68243.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">443</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3201</span> Artificial Intelligence and Distributed System Computing: Application and Practice in Real Life</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Lai%20Junzhe">Lai Junzhe</a>, <a href="https://publications.waset.org/abstracts/search?q=Wang%20Lihao"> Wang Lihao</a>, <a href="https://publications.waset.org/abstracts/search?q=Burra%20Venkata%20Durga%20Kumar"> Burra Venkata Durga Kumar</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In recent years, due to today's global technological advances, big data and artificial intelligence technologies have been widely used in various industries and fields, playing an important role in reducing costs and increasing efficiency. Among them, artificial intelligence has derived another branch in its own continuous progress and the continuous development of computer personnel, namely distributed artificial intelligence computing systems. Distributed AI is a method for solving complex learning, decision-making, and planning problems, characterized by the ability to take advantage of large-scale computation and the spatial distribution of resources, and accordingly, it can handle problems with large data sets. Nowadays, distributed AI is widely used in military, medical, and human daily life and brings great convenience and efficient operation to life. In this paper, we will discuss three areas of distributed AI computing systems in vision processing, blockchain, and smart home to introduce the performance of distributed systems and the role of AI in distributed systems. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=distributed%20system" title="distributed system">distributed system</a>, <a href="https://publications.waset.org/abstracts/search?q=artificial%20intelligence" title=" artificial intelligence"> artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=blockchain" title=" blockchain"> blockchain</a>, <a href="https://publications.waset.org/abstracts/search?q=IoT" title=" IoT"> IoT</a>, <a href="https://publications.waset.org/abstracts/search?q=visual%20information%20processing" title=" visual information processing"> visual information processing</a>, <a href="https://publications.waset.org/abstracts/search?q=smart%20home" title=" smart home"> smart home</a> </p> <a href="https://publications.waset.org/abstracts/153089/artificial-intelligence-and-distributed-system-computing-application-and-practice-in-real-life" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/153089.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">116</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3200</span> Design of a Backlight Hyperspectral Imaging System for Enhancing Image Quality in Artificial Vision Food Packaging Online Inspections</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Ferran%20Paul%C3%AD%20Pla">Ferran Paulí Pla</a>, <a href="https://publications.waset.org/abstracts/search?q=Pere%20Palac%C3%ADn%20Farr%C3%A9"> Pere Palacín Farré</a>, <a href="https://publications.waset.org/abstracts/search?q=Albert%20Fornells%20Herrera"> Albert Fornells Herrera</a>, <a href="https://publications.waset.org/abstracts/search?q=Pol%20Toldr%C3%A0%20Fern%C3%A1ndez"> Pol Toldrà Fernández</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Poor image acquisition is limiting the promising growth of industrial vision in food control. In recent years, the food industry has witnessed a significant increase in the implementation of automation in quality control through artificial vision, a trend that continues to grow. During the packaging process, some defects may appear, compromising the proper sealing of the products and diminishing their shelf life, sanitary conditions and overall properties. While failure to detect a defective product leads to major losses, food producers also aim to minimize over-rejection to avoid unnecessary waste. Thus, accuracy in the evaluation of the products is crucial, and, given the large production volumes, even small improvements have a significant impact. Recently, efforts have been focused on maximizing the performance of classification neural networks; nevertheless, their performance is limited by the quality of the input data. Monochrome linear backlight systems are most commonly used for online inspections of food packaging thermo-sealing zones. These simple acquisition systems fit the high cadence of the production lines imposed by the market demand. Nevertheless, they provide a limited amount of data, which negatively impacts classification algorithm training. A desired situation would be one where data quality is maximized in terms of obtaining the key information to detect defects while maintaining a fast working pace. This work presents a backlight hyperspectral imaging system designed and implemented replicating an industrial environment to better understand the relationship between visual data quality and spectral illumination range for a variety of packed food products. Furthermore, results led to the identification of advantageous spectral bands that significantly enhance image quality, providing clearer detection of defects. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20vision" title="artificial vision">artificial vision</a>, <a href="https://publications.waset.org/abstracts/search?q=food%20packaging" title=" food packaging"> food packaging</a>, <a href="https://publications.waset.org/abstracts/search?q=hyperspectral%20imaging" title=" hyperspectral imaging"> hyperspectral imaging</a>, <a href="https://publications.waset.org/abstracts/search?q=image%20acquisition" title=" image acquisition"> image acquisition</a>, <a href="https://publications.waset.org/abstracts/search?q=quality%20control" title=" quality control"> quality control</a> </p> <a href="https://publications.waset.org/abstracts/191304/design-of-a-backlight-hyperspectral-imaging-system-for-enhancing-image-quality-in-artificial-vision-food-packaging-online-inspections" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/191304.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">30</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3199</span> Analysis of Histogram Asymmetry for Waste Recognition</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Janusz%20Bobulski">Janusz Bobulski</a>, <a href="https://publications.waset.org/abstracts/search?q=Kamila%20Pasternak"> Kamila Pasternak</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Despite many years of effort and research, the problem of waste management is still current. So far, no fully effective waste management system has been developed. Many programs and projects improve statistics on the percentage of waste recycled every year. In these efforts, it is worth using modern Computer Vision techniques supported by artificial intelligence. In the article, we present a method of identifying plastic waste based on the asymmetry analysis of the histogram of the image containing the waste. The method is simple but effective (94%), which allows it to be implemented on devices with low computing power, in particular on microcomputers. Such de-vices will be used both at home and in waste sorting plants. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=waste%20management" title="waste management">waste management</a>, <a href="https://publications.waset.org/abstracts/search?q=environmental%20protection" title=" environmental protection"> environmental protection</a>, <a href="https://publications.waset.org/abstracts/search?q=image%20processing" title=" image processing"> image processing</a>, <a href="https://publications.waset.org/abstracts/search?q=computer%20vision" title=" computer vision"> computer vision</a> </p> <a href="https://publications.waset.org/abstracts/155242/analysis-of-histogram-asymmetry-for-waste-recognition" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/155242.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">127</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3198</span> ANAC-id - Facial Recognition to Detect Fraud</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Giovanna%20Borges%20Bottino">Giovanna Borges Bottino</a>, <a href="https://publications.waset.org/abstracts/search?q=Luis%20Felipe%20Freitas%20do%20Nascimento%20Alves%20Teixeira"> Luis Felipe Freitas do Nascimento Alves Teixeira</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This article aims to present a case study of the National Civil Aviation Agency (ANAC) in Brazil, ANAC-id. ANAC-id is the artificial intelligence algorithm developed for image analysis that recognizes standard images of unobstructed and uprighted face without sunglasses, allowing to identify potential inconsistencies. It combines YOLO architecture and 3 libraries in python - face recognition, face comparison, and deep face, providing robust analysis with high level of accuracy. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20intelligence" title="artificial intelligence">artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=deepface" title=" deepface"> deepface</a>, <a href="https://publications.waset.org/abstracts/search?q=face%20compare" title=" face compare"> face compare</a>, <a href="https://publications.waset.org/abstracts/search?q=face%20recognition" title=" face recognition"> face recognition</a>, <a href="https://publications.waset.org/abstracts/search?q=YOLO" title=" YOLO"> YOLO</a>, <a href="https://publications.waset.org/abstracts/search?q=computer%20vision" title=" computer vision"> computer vision</a> </p> <a href="https://publications.waset.org/abstracts/148459/anac-id-facial-recognition-to-detect-fraud" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/148459.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">163</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3197</span> Description of the Non-Iterative Learning Algorithm of Artificial Neuron</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=B.%20S.%20Akhmetov">B. S. Akhmetov</a>, <a href="https://publications.waset.org/abstracts/search?q=S.%20T.%20Akhmetova"> S. T. Akhmetova</a>, <a href="https://publications.waset.org/abstracts/search?q=A.%20I.%20Ivanov"> A. I. Ivanov</a>, <a href="https://publications.waset.org/abstracts/search?q=T.%20S.%20Kartbayev"> T. S. Kartbayev</a>, <a href="https://publications.waset.org/abstracts/search?q=A.%20Y.%20Malygin"> A. Y. Malygin</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The problem of training of a network of artificial neurons in biometric appendices is that this process has to be completely automatic, i.e. the person operator should not participate in it. Therefore, this article discusses the issues of training the network of artificial neurons and the description of the non-iterative learning algorithm of artificial neuron. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20neuron" title="artificial neuron">artificial neuron</a>, <a href="https://publications.waset.org/abstracts/search?q=biometrics" title=" biometrics"> biometrics</a>, <a href="https://publications.waset.org/abstracts/search?q=biometrical%20applications" title=" biometrical applications"> biometrical applications</a>, <a href="https://publications.waset.org/abstracts/search?q=learning%20of%20neuron" title=" learning of neuron"> learning of neuron</a>, <a href="https://publications.waset.org/abstracts/search?q=non-iterative%20algorithm" title=" non-iterative algorithm"> non-iterative algorithm</a> </p> <a href="https://publications.waset.org/abstracts/19446/description-of-the-non-iterative-learning-algorithm-of-artificial-neuron" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/19446.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">501</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3196</span> A Systematic Categorization of Arguments against the Vision Zero Goal: A Literature Review</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Henok%20Girma%20Abebe">Henok Girma Abebe</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The Vision Zero is a long-term goal of preventing all road traffic fatalities and serious injuries which was first adopted in Sweden in 1997. It is based on the assumption that death and serious injury in the road system is morally unacceptable. In order to approach this end, vision zero has put in place strategies that are radically different from the traditional safety work. The vision zero, for instance, promoted the adoption of the best available technology to promote safety, and placed the ultimate responsibility for traffic safety on system designers. Despite Vision Zero’s moral appeal and its expansion to different safety areas and also parts of the world, important philosophical concerns related to the adoption and implementation of the vision zero remain to be addressed. Moreover, the vision zero goal has been criticized on different grounds. The aim of this paper is to identify and systematically categorize criticisms that have been put forward against vision zero. The findings of the paper are solely based on a critical analysis of secondary sources and snowball method is employed to identify the relevant philosophical and empirical literatures. Two general categories of criticisms on the vision zero goal are identified. The first category consists of criticisms that target the setting of vision zero as a ‘goal’ and some of the basic assumptions upon which the goal is based. Among others, the goal of achieving zero fatalities and serious injuries, together with vision zero’s lexicographical prioritization of safety has been criticized as unrealistic. The second category consists of criticisms that target the strategies put in place to achieve the goal of zero fatalities and serious injuries. For instance, Vision zero’s responsibility ascription for road safety and its rejection of cost-benefit analysis in the formulation and adoption of safety measures has both been criticized as counterproductive. In this category also falls the criticism that Vision Zero safety measures tend to be too paternalistic. Significant improvements have been recorded in road safety work since the adoption of vision zero, however, for the vision zero to even succeed more, it is important that issues and criticisms of philosophical nature associated with it are identified and critically dealt with. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=criticisms" title="criticisms">criticisms</a>, <a href="https://publications.waset.org/abstracts/search?q=systems%20approach" title=" systems approach"> systems approach</a>, <a href="https://publications.waset.org/abstracts/search?q=traffic%20safety" title=" traffic safety"> traffic safety</a>, <a href="https://publications.waset.org/abstracts/search?q=vision%20zero" title=" vision zero"> vision zero</a> </p> <a href="https://publications.waset.org/abstracts/97167/a-systematic-categorization-of-arguments-against-the-vision-zero-goal-a-literature-review" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/97167.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">314</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3195</span> Analysis of Q-Learning on Artificial Neural Networks for Robot Control Using Live Video Feed</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Nihal%20Murali">Nihal Murali</a>, <a href="https://publications.waset.org/abstracts/search?q=Kunal%20Gupta"> Kunal Gupta</a>, <a href="https://publications.waset.org/abstracts/search?q=Surekha%20Bhanot"> Surekha Bhanot</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Training of artificial neural networks (ANNs) using reinforcement learning (RL) techniques is being widely discussed in the robot learning literature. The high model complexity of ANNs along with the model-free nature of RL algorithms provides a desirable combination for many robotics applications. There is a huge need for algorithms that generalize using raw sensory inputs, such as vision, without any hand-engineered features or domain heuristics. In this paper, the standard control problem of line following robot was used as a test-bed, and an ANN controller for the robot was trained on images from a live video feed using Q-learning. A virtual agent was first trained in simulation environment and then deployed onto a robot’s hardware. The robot successfully learns to traverse a wide range of curves and displays excellent generalization ability. Qualitative analysis of the evolution of policies, performance and weights of the network provide insights into the nature and convergence of the learning algorithm. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20neural%20networks" title="artificial neural networks">artificial neural networks</a>, <a href="https://publications.waset.org/abstracts/search?q=q-learning" title=" q-learning"> q-learning</a>, <a href="https://publications.waset.org/abstracts/search?q=reinforcement%20learning" title=" reinforcement learning"> reinforcement learning</a>, <a href="https://publications.waset.org/abstracts/search?q=robot%20learning" title=" robot learning"> robot learning</a> </p> <a href="https://publications.waset.org/abstracts/70136/analysis-of-q-learning-on-artificial-neural-networks-for-robot-control-using-live-video-feed" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/70136.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">377</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3194</span> Artificial Intelligence and Personhood: An African Perspective</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Meshandren%20Naidoo">Meshandren Naidoo</a>, <a href="https://publications.waset.org/abstracts/search?q=Amy%20Gooden"> Amy Gooden</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The concept of personhood extending from the moral status of an artificial intelligence system has been explored – but predominantly from a Western conception of personhood. African personhood, however, is distinctly different from Western personhood in that communitarianism is central rather than individualism. Given the decolonization projects happening in Africa, it’s paramount to consider these views. This research demonstrates that the African notion of personhood may extend for an artificial intelligent system where the pre-conditions are met. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20intelligence" title="artificial intelligence">artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=ethics" title=" ethics"> ethics</a>, <a href="https://publications.waset.org/abstracts/search?q=law" title=" law"> law</a>, <a href="https://publications.waset.org/abstracts/search?q=personhood" title=" personhood"> personhood</a>, <a href="https://publications.waset.org/abstracts/search?q=policy" title=" policy"> policy</a> </p> <a href="https://publications.waset.org/abstracts/153439/artificial-intelligence-and-personhood-an-african-perspective" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/153439.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">140</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3193</span> Artificial Intelligence and Police</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Mehrnoosh%20Abouzari">Mehrnoosh Abouzari</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Artificial intelligence has covered all areas of human life and has helped or replaced many jobs. One of the areas of application of artificial intelligence in the police is to detect crime, identify the accused or victim and prove the crime. It will play an effective role in implementing preventive justice and creating security in the community, and improving judicial decisions. This will help improve the performance of the police, increase the accuracy of criminal investigations, and play an effective role in preventing crime and high-risk behaviors in society. This article presents and analyzes the capabilities and capacities of artificial intelligence in police and similar examples used worldwide to prove the necessity of using artificial intelligence in the police. The main topics discussed include the performance of artificial intelligence in crime detection and prediction, the risk capacity of criminals and the ability to apply arbitray institutions, and the introduction of artificial intelligence programs implemented worldwide in the field of criminal investigation for police. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=police" title="police">police</a>, <a href="https://publications.waset.org/abstracts/search?q=artificial%20intelligence" title=" artificial intelligence"> artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=forecasting" title=" forecasting"> forecasting</a>, <a href="https://publications.waset.org/abstracts/search?q=prevention" title=" prevention"> prevention</a>, <a href="https://publications.waset.org/abstracts/search?q=software" title=" software"> software</a> </p> <a href="https://publications.waset.org/abstracts/141793/artificial-intelligence-and-police" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/141793.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">213</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3192</span> The Role of Artificial Intelligence in Concrete Constructions</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Ardalan%20Tofighi%20Soleimandarabi">Ardalan Tofighi Soleimandarabi</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Artificial intelligence has revolutionized the concrete construction industry and improved processes by increasing efficiency, accuracy, and sustainability. This article examines the applications of artificial intelligence in predicting the compressive strength of concrete, optimizing mixing plans, and improving structural health monitoring systems. Artificial intelligence-based models, such as artificial neural networks (ANN) and combined machine learning techniques, have shown better performance than traditional methods in predicting concrete properties. In addition, artificial intelligence systems have made it possible to improve quality control and real-time monitoring of structures, which helps in preventive maintenance and increases the life of infrastructure. Also, the use of artificial intelligence plays an effective role in sustainable construction by optimizing material consumption and reducing waste. Although the implementation of artificial intelligence is associated with challenges such as high initial costs and the need for specialized training, it will create a smarter, more sustainable, and more affordable future for concrete structures. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20intelligence" title="artificial intelligence">artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=concrete%20construction" title=" concrete construction"> concrete construction</a>, <a href="https://publications.waset.org/abstracts/search?q=compressive%20strength%20prediction" title=" compressive strength prediction"> compressive strength prediction</a>, <a href="https://publications.waset.org/abstracts/search?q=structural%20health%20monitoring" title=" structural health monitoring"> structural health monitoring</a>, <a href="https://publications.waset.org/abstracts/search?q=stability" title=" stability"> stability</a> </p> <a href="https://publications.waset.org/abstracts/198833/the-role-of-artificial-intelligence-in-concrete-constructions" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/198833.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3191</span> Data Mining of Students' Performance Using Artificial Neural Network: Turkish Students as a Case Study</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Samuel%20Nii%20Tackie">Samuel Nii Tackie</a>, <a href="https://publications.waset.org/abstracts/search?q=Oyebade%20K.%20Oyedotun"> Oyebade K. Oyedotun</a>, <a href="https://publications.waset.org/abstracts/search?q=Ebenezer%20O.%20Olaniyi"> Ebenezer O. Olaniyi</a>, <a href="https://publications.waset.org/abstracts/search?q=Adnan%20Khashman"> Adnan Khashman</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Artificial neural networks have been used in different fields of artificial intelligence, and more specifically in machine learning. Although, other machine learning options are feasible in most situations, but the ease with which neural networks lend themselves to different problems which include pattern recognition, image compression, classification, computer vision, regression etc. has earned it a remarkable place in the machine learning field. This research exploits neural networks as a data mining tool in predicting the number of times a student repeats a course, considering some attributes relating to the course itself, the teacher, and the particular student. Neural networks were used in this work to map the relationship between some attributes related to students’ course assessment and the number of times a student will possibly repeat a course before he passes. It is the hope that the possibility to predict students’ performance from such complex relationships can help facilitate the fine-tuning of academic systems and policies implemented in learning environments. To validate the power of neural networks in data mining, Turkish students’ performance database has been used; feedforward and radial basis function networks were trained for this task; and the performances obtained from these networks evaluated in consideration of achieved recognition rates and training time. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20neural%20network" title="artificial neural network">artificial neural network</a>, <a href="https://publications.waset.org/abstracts/search?q=data%20mining" title=" data mining"> data mining</a>, <a href="https://publications.waset.org/abstracts/search?q=classification" title=" classification"> classification</a>, <a href="https://publications.waset.org/abstracts/search?q=students%E2%80%99%20evaluation" title=" students’ evaluation"> students’ evaluation</a> </p> <a href="https://publications.waset.org/abstracts/25099/data-mining-of-students-performance-using-artificial-neural-network-turkish-students-as-a-case-study" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/25099.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">620</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3190</span> Applications of Artificial Neural Networks in Civil Engineering </h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Naci%20B%C3%BCy%C3%BCkkarac%C4%B1%C4%9Fan">Naci Büyükkaracığan</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Artificial neural networks (ANN) is an electrical model based on the human brain nervous system and working principle. Artificial neural networks have been the subject of an active field of research that has matured greatly over the past 55 years. ANN now is used in many fields. But, it has been viewed that artificial neural networks give better results in particular optimization and control systems. There are requirements of optimization and control system in many of the area forming the subject of civil engineering applications. In this study, the first artificial intelligence systems are widely used in the solution of civil engineering systems were examined with the basic principles and technical aspects. Finally, the literature reviews for applications in the field of civil engineering were conducted and also artificial intelligence techniques were informed about the study and its results. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20neural%20networks" title="artificial neural networks">artificial neural networks</a>, <a href="https://publications.waset.org/abstracts/search?q=civil%20engineering" title=" civil engineering"> civil engineering</a>, <a href="https://publications.waset.org/abstracts/search?q=Fuzzy%20logic" title=" Fuzzy logic"> Fuzzy logic</a>, <a href="https://publications.waset.org/abstracts/search?q=statistics" title=" statistics"> statistics</a> </p> <a href="https://publications.waset.org/abstracts/29908/applications-of-artificial-neural-networks-in-civil-engineering" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/29908.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">421</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3189</span> VisioGuide: An Artificial Intelligence for Visually Impaired</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Shashanka%20R.">Shashanka R.</a>, <a href="https://publications.waset.org/abstracts/search?q=Anisha%20Devi"> Anisha Devi</a>, <a href="https://publications.waset.org/abstracts/search?q=Lakith%20Gowda%20K."> Lakith Gowda K.</a>, <a href="https://publications.waset.org/abstracts/search?q=Kushal%20S."> Kushal S.</a>, <a href="https://publications.waset.org/abstracts/search?q=Ravi%20Kumar%20V."> Ravi Kumar V.</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In today's rapidly changing technological environment, there is a rising acknowledgment of the need for independent living for visually impaired people who confront major social inclusion issues.They may need human support to navigate these situations. Visually impaired people frequently lack access to critical information about their surroundings because visual information is crucial to most daily actions. Support for visually impaired people is on the rise; however, some factors bring about disparity in this support. Most of the recent developments in assistive technology create a platform to not only address this particular gap but also enhance, especially the support for the visually impaired. This research intends to develop an application for the Android platform to encourage self-reliance and social inclusion of the visually impaired. It uses current communication technologies, artificial intelligence, visual recognition, and machine learning. The application includes features like voice command, OCR, navigation, emergency call, screen reader, and connectivity to other service apps such as uber, among others. Moreover, it works in Kannada also to encourage its usage. The primary objective is to promote more freedom and inclusion among the sight impaired.This smart solution seeks to enhance the standards of living for blind or vision impaired people and motivate their active participation in modern technological advances by encouraging them to interact with the surroundings through the different features of the app. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=AI" title="AI">AI</a>, <a href="https://publications.waset.org/abstracts/search?q=NLP" title=" NLP"> NLP</a>, <a href="https://publications.waset.org/abstracts/search?q=computer%20vision" title=" computer vision"> computer vision</a>, <a href="https://publications.waset.org/abstracts/search?q=assistive%20technology" title=" assistive technology"> assistive technology</a> </p> <a href="https://publications.waset.org/abstracts/198548/visioguide-an-artificial-intelligence-for-visually-impaired" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/198548.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3188</span> The Role of Synthetic Data in Aerial Object Detection</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Ava%20Dodd">Ava Dodd</a>, <a href="https://publications.waset.org/abstracts/search?q=Jonathan%20Adams"> Jonathan Adams</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The purpose of this study is to explore the characteristics of developing a machine learning application using synthetic data. The study is structured to develop the application for the purpose of deploying the computer vision model. The findings discuss the realities of attempting to develop a computer vision model for practical purpose, and detail the processes, tools, and techniques that were used to meet accuracy requirements. The research reveals that synthetic data represents another variable that can be adjusted to improve the performance of a computer vision model. Further, a suite of tools and tuning recommendations are provided. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=computer%20vision" title="computer vision">computer vision</a>, <a href="https://publications.waset.org/abstracts/search?q=machine%20learning" title=" machine learning"> machine learning</a>, <a href="https://publications.waset.org/abstracts/search?q=synthetic%20data" title=" synthetic data"> synthetic data</a>, <a href="https://publications.waset.org/abstracts/search?q=YOLOv4" title=" YOLOv4"> YOLOv4</a> </p> <a href="https://publications.waset.org/abstracts/139194/the-role-of-synthetic-data-in-aerial-object-detection" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/139194.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">231</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3187</span> Convolutional Neural Network and LSTM Applied to Abnormal Behaviour Detection from Highway Footage</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Rafael%20Marinho%20de%20Andrade">Rafael Marinho de Andrade</a>, <a href="https://publications.waset.org/abstracts/search?q=Elcio%20Hideti%20Shiguemori"> Elcio Hideti Shiguemori</a>, <a href="https://publications.waset.org/abstracts/search?q=Rafael%20Duarte%20Coelho%20dos%20Santos"> Rafael Duarte Coelho dos Santos</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Relying on computer vision, many clever things are possible in order to make the world safer and optimized on resource management, especially considering time and attention as manageable resources, once the modern world is very abundant in cameras from inside our pockets to above our heads while crossing the streets. Thus, automated solutions based on computer vision techniques to detect, react, or even prevent relevant events such as robbery, car crashes and traffic jams can be accomplished and implemented for the sake of both logistical and surveillance improvements. In this paper, we present an approach for vehicles’ abnormal behaviors detection from highway footages, in which the vectorial data of the vehicles’ displacement are extracted directly from surveillance cameras footage through object detection and tracking with a deep convolutional neural network and inserted into a long-short term memory neural network for behavior classification. The results show that the classifications of behaviors are consistent and the same principles may be applied to other trackable objects and scenarios as well. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20intelligence" title="artificial intelligence">artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=behavior%20detection" title=" behavior detection"> behavior detection</a>, <a href="https://publications.waset.org/abstracts/search?q=computer%20vision" title=" computer vision"> computer vision</a>, <a href="https://publications.waset.org/abstracts/search?q=convolutional%20neural%20networks" title=" convolutional neural networks"> convolutional neural networks</a>, <a href="https://publications.waset.org/abstracts/search?q=LSTM" title=" LSTM"> LSTM</a>, <a href="https://publications.waset.org/abstracts/search?q=highway%20footage" title=" highway footage"> highway footage</a> </p> <a href="https://publications.waset.org/abstracts/144246/convolutional-neural-network-and-lstm-applied-to-abnormal-behaviour-detection-from-highway-footage" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/144246.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">172</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3186</span> I, A.I. An Interdisciplinary Exploration of Artificial Consciousness</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Juh%C3%A1sz%20D%C3%A1vid">Juhász Dávid</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This paper examines the nature of artificial consciousness through an interdisciplinary lens, integrating philosophy, epistemology, and computational theories. Beginning with presocratic thought, such as Protagoras’ relativism and Gorgias’ rhetoric, it contextualizes the epistemological implications of artificial intelligence as inherently subjective, attributing grave importance to this subjectivity. The paper draws parallels between Plato’s cave dwellers and AI systems, arguing that both rely on lossy representations of the world, raising questions about their true understanding of reality. Cartesian and Hegelian frameworks are explored to distinguish between weak and strong artificial intelligence, emphasizing embodied cognition and the moral obligations tied to emergent artificial consciousness. The discussion extends to quantum computing, panpsychism, and the potential of artificial minds to reshape our perception of time and existence. By critically analyzing these perspectives, the paper advocates for a nuanced understanding of artificial consciousness and its ethical, epistemological, and societal implications. It invites readers to reconsider humanity’s evolving relationship with intelligence and sentience. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=A.I." title="A.I.">A.I.</a>, <a href="https://publications.waset.org/abstracts/search?q=philosophy%20of%20mind" title=" philosophy of mind"> philosophy of mind</a>, <a href="https://publications.waset.org/abstracts/search?q=artificial%20consciousness" title=" artificial consciousness"> artificial consciousness</a>, <a href="https://publications.waset.org/abstracts/search?q=cognitive%20computing" title=" cognitive computing"> cognitive computing</a> </p> <a href="https://publications.waset.org/abstracts/198995/i-ai-an-interdisciplinary-exploration-of-artificial-consciousness" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/198995.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3185</span> Functional Vision of Older People in Galician Nursing Homes</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=C.%20V%C3%A1zquez">C. Vázquez</a>, <a href="https://publications.waset.org/abstracts/search?q=L.%20M.%20Gigirey"> L. M. Gigirey</a>, <a href="https://publications.waset.org/abstracts/search?q=C.%20P.%20del%20Oro"> C. P. del Oro</a>, <a href="https://publications.waset.org/abstracts/search?q=S.%20Seoane"> S. Seoane </a> </p> <p class="card-text"><strong>Abstract:</strong></p> Early detection of visual problems plays a key role in the aging process. However, although vision problems are common among older people, the percentage of aging people who perform regular optometric exams is low. In fact, uncorrected refractive errors are one of the main causes of visual impairment in this group of the population. Purpose: To evaluate functional vision of older residents in order to show the urgent need of visual screening programs in Galician nursing homes. Methodology: We examined 364 older adults aged 65 years and over. To measure vision of the daily living, we tested distance and near presenting visual acuity (binocular visual acuity with habitual correction if warn, directional E-Snellen) Presenting near vision was tested at the usual working distance. We defined visual impairment (distance and near) as a presenting visual acuity less than 0.3. Exclusion criteria included immobilized residents unable to reach the USC Dual Sensory Loss Unit for visual screening. Association between categorical variables was performed using chi-square tests. We used Pearson and Spearman correlation tests and the variance analysis to determine differences between groups of interest. Results: 23,1% of participants have visual impairment for distance vision and 16,4% for near vision. The percentage of residents with far and near visual impairment reaches 8,2%. As expected, prevalence of visual impairment increases with age. No differences exist with regard to the level of functional vision between gender. Differences exist between age group respect to distance vision, but not in case of near vision. Conclusion: prevalence of visual impairment is high among the older people tested in this pilot study. This means a high percentage of older people with limitations in their daily life activities. It is necessary to develop an effective vision screening program for early detection of vision problems in Galician nursing homes. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=functional%20vision" title="functional vision">functional vision</a>, <a href="https://publications.waset.org/abstracts/search?q=elders" title=" elders"> elders</a>, <a href="https://publications.waset.org/abstracts/search?q=aging" title=" aging"> aging</a>, <a href="https://publications.waset.org/abstracts/search?q=nursing%20homes" title=" nursing homes"> nursing homes</a> </p> <a href="https://publications.waset.org/abstracts/17989/functional-vision-of-older-people-in-galician-nursing-homes" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/17989.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">414</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3184</span> Artificial Intelligence for Safety Related Aviation Incident and Accident Investigation Scenarios</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Bernabeo%20R.%20Alberto">Bernabeo R. Alberto</a> </p> <p class="card-text"><strong>Abstract:</strong></p> With the tremendous improvements in the processing power of computers, the possibilities of artificial intelligence will increasingly be used in aviation and make autonomous flights, preventive maintenance, ATM (Air Traffic Management) optimization, pilots, cabin crew, ground staff, and airport staff training possible in a cost-saving, less time-consuming and less polluting way. Through the use of artificial intelligence, we foresee an interviewing scenario where the interviewee will interact with the artificial intelligence tool to contextualize the character and the necessary information in a way that aligns reasonably with the character and the scenario. We are creating simulated scenarios connected with either an aviation incident or accident to enhance also the training of future accident/incident investigators integrating artificial intelligence and augmented reality tools. The project's goal is to improve the learning and teaching scenario through academic and professional expertise in aviation and in the artificial intelligence field. Thus, we intend to contribute to the needed high innovation capacity, skills, and training development and management of artificial intelligence, supported by appropriate regulations and attention to ethical problems. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20intelligence" title="artificial intelligence">artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=aviation%20accident" title=" aviation accident"> aviation accident</a>, <a href="https://publications.waset.org/abstracts/search?q=aviation%20incident" title=" aviation incident"> aviation incident</a>, <a href="https://publications.waset.org/abstracts/search?q=risk" title=" risk"> risk</a>, <a href="https://publications.waset.org/abstracts/search?q=safety" title=" safety"> safety</a> </p> <a href="https://publications.waset.org/abstracts/191510/artificial-intelligence-for-safety-related-aviation-incident-and-accident-investigation-scenarios" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/191510.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">30</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3183</span> Amazon and Its AI Features</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Leen%20Sulaimani">Leen Sulaimani</a>, <a href="https://publications.waset.org/abstracts/search?q=Maryam%20Hafiz"> Maryam Hafiz</a>, <a href="https://publications.waset.org/abstracts/search?q=Naba%20Ali"> Naba Ali</a>, <a href="https://publications.waset.org/abstracts/search?q=Roba%20Alsharif"> Roba Alsharif</a> </p> <p class="card-text"><strong>Abstract:</strong></p> One of Amazon’s most crucial online systems is artificial intelligence. Amazon would not have a worldwide successful online store, an easy and secure way of payment, and other services if it weren’t for artificial intelligence and machine learning. Amazon uses AI to expand its operations and enhance them by upgrading the website daily; having a strong base of artificial intelligence in a worldwide successful business can improve marketing, decision-making, feedback, and more qualities. Aiming to have a rational AI system in one’s business should be the start of any process; that is why Amazon is fortunate that they keep taking care of the base of their business by using modern artificial intelligence, making sure that it is stable, reaching their organizational goals, and will continue to thrive more each and every day. Artificial intelligence is used daily in our current world and is still being amplified more each day to reach consumer satisfaction and company short and long-term goals. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=artificial%20intelligence" title="artificial intelligence">artificial intelligence</a>, <a href="https://publications.waset.org/abstracts/search?q=Amazon" title=" Amazon"> Amazon</a>, <a href="https://publications.waset.org/abstracts/search?q=business" title=" business"> business</a>, <a href="https://publications.waset.org/abstracts/search?q=customer" title=" customer"> customer</a>, <a href="https://publications.waset.org/abstracts/search?q=decision%20making" title=" decision making"> decision making</a> </p> <a href="https://publications.waset.org/abstracts/167535/amazon-and-its-ai-features" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/167535.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">116</span> </span> </div> </div> <div class="card paper-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">3182</span> Role of Vision Centers in Eliminating Avoidable Blindness Caused Due to Uncorrected Refractive Error in Rural South India</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/abstracts/search?q=Ranitha%20Guna%20Selvi%20D">Ranitha Guna Selvi D</a>, <a href="https://publications.waset.org/abstracts/search?q=Ramakrishnan%20R"> Ramakrishnan R</a>, <a href="https://publications.waset.org/abstracts/search?q=Mohideen%20Abdul%20Kader"> Mohideen Abdul Kader</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Purpose: To study the role of Vision centers in managing preventable blindness through refractive error correction in Rural South India. Methods: A retrospective analysis of patients attending 15 Vision centers in Rural South India from a period of January 2021 to December 2021 was done. Medical records of 10,85,81 patients both new and reviewed, 79,562 newly registered patients and 29,019 review patient’s from15 Vision centers were included for data analysis. All the patients registered at the vision center underwent basic eye examination, including visual acuity, IOP measurement, Slit-lamp examination, retinoscopy, Fundus examination etc. Results: A total of 1,08,581 patients were included in the study. Of the total 1,08,581 patients, 79,562 were newly registered patients at Vision center and 29,019 were review patients. Males were 52,201(48.1%) and Females were 56,308(51.9) among them. The mean age of all examined patients was 41.03 ± 20.9 years (Standard deviation) and ranged from 01 – 113 years. Presenting mean visual acuity was 0.31 ± 0.5 in the right eye and 0.31 ± 0.4 in the left eye. Of the 1,08,581 patients 22,770 patients had refractive error in right eye and 22,721 patients had uncorrected refractive error in left eye. Glass prescription was given to 17,178 (15.8%) patients. 8,109 (7.5%) patients were referred to the base hospital for specialty clinic expert opinion or for cataract surgery. Conclusion: Vision center utilizing teleconsultation for comprehensive eye screening unit is a very effective tool in reducing the avoidable visual impairment caused due to uncorrected refractive error. Vision Centre model is believed to be efficient as it facilitates early detection and management of uncorrected refractive errors. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/abstracts/search?q=refractive%20error" title="refractive error">refractive error</a>, <a href="https://publications.waset.org/abstracts/search?q=uncorrected%20refractive%20error" title=" uncorrected refractive error"> uncorrected refractive error</a>, <a href="https://publications.waset.org/abstracts/search?q=vision%20center" title=" vision center"> vision center</a>, <a href="https://publications.waset.org/abstracts/search?q=vision%20technician" title=" vision technician"> vision technician</a>, <a href="https://publications.waset.org/abstracts/search?q=teleconsultation" title=" teleconsultation"> teleconsultation</a> </p> <a href="https://publications.waset.org/abstracts/146361/role-of-vision-centers-in-eliminating-avoidable-blindness-caused-due-to-uncorrected-refractive-error-in-rural-south-india" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/abstracts/146361.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">151</span> </span> </div> </div> <ul class="pagination"> <li class="page-item disabled"><span class="page-link">‹</span></li> <li class="page-item active"><span class="page-link">1</span></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=2">2</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=3">3</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=4">4</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=5">5</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=6">6</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=7">7</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=8">8</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=9">9</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=10">10</a></li> <li class="page-item disabled"><span class="page-link">...</span></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=107">107</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=108">108</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/abstracts/search?q=artificial%20vision&page=2" rel="next">›</a></li> </ul> </div> </main> <footer> <div id="infolinks" class="pt-3 pb-2"> <div class="container"> <div style="background-color:#f5f5f5;" class="p-3"> <div class="row"> <div class="col-md-2"> <ul class="list-unstyled"> About <li><a href="https://waset.org/page/support">About Us</a></li> <li><a href="https://waset.org/page/support#legal-information">Legal</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/WASET-16th-foundational-anniversary.pdf">WASET celebrates its 16th foundational anniversary</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Account <li><a href="https://waset.org/profile">My Account</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Explore <li><a href="https://waset.org/disciplines">Disciplines</a></li> <li><a href="https://waset.org/conferences">Conferences</a></li> <li><a href="https://waset.org/conference-programs">Conference Program</a></li> <li><a href="https://waset.org/committees">Committees</a></li> <li><a href="https://publications.waset.org">Publications</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Research <li><a href="https://publications.waset.org/abstracts">Abstracts</a></li> <li><a href="https://publications.waset.org">Periodicals</a></li> <li><a href="https://publications.waset.org/archive">Archive</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Open Science <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Science-Philosophy.pdf">Open Science Philosophy</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Science-Award.pdf">Open Science Award</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Society-Open-Science-and-Open-Innovation.pdf">Open Innovation</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Postdoctoral-Fellowship-Award.pdf">Postdoctoral Fellowship Award</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Scholarly-Research-Review.pdf">Scholarly Research Review</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Support <li><a href="https://waset.org/page/support">Support</a></li> <li><a href="https://waset.org/profile/messages/create">Contact Us</a></li> <li><a href="https://waset.org/profile/messages/create">Report Abuse</a></li> </ul> </div> </div> </div> </div> </div> <div class="container text-center"> <hr style="margin-top:0;margin-bottom:.3rem;"> <a href="https://creativecommons.org/licenses/by/4.0/" target="_blank" class="text-muted small">Creative Commons Attribution 4.0 International License</a> <div id="copy" class="mt-2">© 2025 World Academy of Science, Engineering and Technology</div> </div> </footer> <a href="javascript:" id="return-to-top"><i class="fas fa-arrow-up"></i></a> <div class="modal" id="modal-template"> <div class="modal-dialog"> <div class="modal-content"> <div class="row m-0 mt-1"> <div class="col-md-12"> <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button> </div> </div> <div class="modal-body"></div> </div> </div> </div> <script src="https://cdn.waset.org/static/plugins/jquery-3.3.1.min.js"></script> <script src="https://cdn.waset.org/static/plugins/bootstrap-4.2.1/js/bootstrap.bundle.min.js"></script> <script src="https://cdn.waset.org/static/js/site.js?v=150220211556"></script> <script> jQuery(document).ready(function() { /*jQuery.get("https://publications.waset.org/xhr/user-menu", function (response) { jQuery('#mainNavMenu').append(response); });*/ jQuery.get({ url: "https://publications.waset.org/xhr/user-menu", cache: false }).then(function(response){ jQuery('#mainNavMenu').append(response); }); }); </script> </body> </html>