CINXE.COM
Search results for: facial animation
<!DOCTYPE html> <html lang="en" dir="ltr"> <head> <!-- Google tag (gtag.js) --> <script async src="https://www.googletagmanager.com/gtag/js?id=G-P63WKM1TM1"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-P63WKM1TM1'); </script> <!-- Yandex.Metrika counter --> <script type="text/javascript" > (function(m,e,t,r,i,k,a){m[i]=m[i]||function(){(m[i].a=m[i].a||[]).push(arguments)}; m[i].l=1*new Date(); for (var j = 0; j < document.scripts.length; j++) {if (document.scripts[j].src === r) { return; }} k=e.createElement(t),a=e.getElementsByTagName(t)[0],k.async=1,k.src=r,a.parentNode.insertBefore(k,a)}) (window, document, "script", "https://mc.yandex.ru/metrika/tag.js", "ym"); ym(55165297, "init", { clickmap:false, trackLinks:true, accurateTrackBounce:true, webvisor:false }); </script> <noscript><div><img src="https://mc.yandex.ru/watch/55165297" style="position:absolute; left:-9999px;" alt="" /></div></noscript> <!-- /Yandex.Metrika counter --> <!-- Matomo --> <script> var _paq = window._paq = window._paq || []; /* tracker methods like "setCustomDimension" should be called before "trackPageView" */ _paq.push(['trackPageView']); _paq.push(['enableLinkTracking']); (function() { var u="//matomo.waset.org/"; _paq.push(['setTrackerUrl', u+'matomo.php']); _paq.push(['setSiteId', '2']); var d=document, g=d.createElement('script'), s=d.getElementsByTagName('script')[0]; g.async=true; g.src=u+'matomo.js'; s.parentNode.insertBefore(g,s); })(); </script> <!-- End Matomo Code --> <title>Search results for: facial animation</title> <meta name="description" content="Search results for: facial animation"> <meta name="keywords" content="facial animation"> <meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1, maximum-scale=1, user-scalable=no"> <meta charset="utf-8"> <link href="https://cdn.waset.org/favicon.ico" type="image/x-icon" rel="shortcut icon"> <link href="https://cdn.waset.org/static/plugins/bootstrap-4.2.1/css/bootstrap.min.css" rel="stylesheet"> <link href="https://cdn.waset.org/static/plugins/fontawesome/css/all.min.css" rel="stylesheet"> <link href="https://cdn.waset.org/static/css/site.css?v=150220211555" rel="stylesheet"> </head> <body> <header> <div class="container"> <nav class="navbar navbar-expand-lg navbar-light"> <a class="navbar-brand" href="https://waset.org"> <img src="https://cdn.waset.org/static/images/wasetc.png" alt="Open Science Research Excellence" title="Open Science Research Excellence" /> </a> <button class="d-block d-lg-none navbar-toggler ml-auto" type="button" data-toggle="collapse" data-target="#navbarMenu" aria-controls="navbarMenu" aria-expanded="false" aria-label="Toggle navigation"> <span class="navbar-toggler-icon"></span> </button> <div class="w-100"> <div class="d-none d-lg-flex flex-row-reverse"> <form method="get" action="https://waset.org/search" class="form-inline my-2 my-lg-0"> <input class="form-control mr-sm-2" type="search" placeholder="Search Conferences" value="facial animation" name="q" aria-label="Search"> <button class="btn btn-light my-2 my-sm-0" type="submit"><i class="fas fa-search"></i></button> </form> </div> <div class="collapse navbar-collapse mt-1" id="navbarMenu"> <ul class="navbar-nav ml-auto align-items-center" id="mainNavMenu"> <li class="nav-item"> <a class="nav-link" href="https://waset.org/conferences" title="Conferences in 2025/2026/2027">Conferences</a> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/disciplines" title="Disciplines">Disciplines</a> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/committees" rel="nofollow">Committees</a> </li> <li class="nav-item dropdown"> <a class="nav-link dropdown-toggle" href="#" id="navbarDropdownPublications" role="button" data-toggle="dropdown" aria-haspopup="true" aria-expanded="false"> Publications </a> <div class="dropdown-menu" aria-labelledby="navbarDropdownPublications"> <a class="dropdown-item" href="https://publications.waset.org/abstracts">Abstracts</a> <a class="dropdown-item" href="https://publications.waset.org">Periodicals</a> <a class="dropdown-item" href="https://publications.waset.org/archive">Archive</a> </div> </li> <li class="nav-item"> <a class="nav-link" href="https://waset.org/page/support" title="Support">Support</a> </li> </ul> </div> </div> </nav> </div> </header> <main> <div class="container mt-4"> <div class="row"> <div class="col-md-9 mx-auto"> <form method="get" action="https://publications.waset.org/search"> <div id="custom-search-input"> <div class="input-group"> <i class="fas fa-search"></i> <input type="text" class="search-query" name="q" placeholder="Author, Title, Abstract, Keywords" value="facial animation"> <input type="submit" class="btn_search" value="Search"> </div> </div> </form> </div> </div> <div class="row mt-3"> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Commenced</strong> in January 2007</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Frequency:</strong> Monthly</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Edition:</strong> International</div> </div> </div> <div class="col-sm-3"> <div class="card"> <div class="card-body"><strong>Paper Count:</strong> 156</div> </div> </div> </div> <h1 class="mt-3 mb-3 text-center" style="font-size:1.6rem;">Search results for: facial animation</h1> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">156</span> Facial Expressions Animation and Lip Tracking Using Facial Characteristic Points and Deformable Model</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Hadi%20Seyedarabi">Hadi Seyedarabi</a>, <a href="https://publications.waset.org/search?q=Ali%20Aghagolzadeh"> Ali Aghagolzadeh</a>, <a href="https://publications.waset.org/search?q=Sohrab%20Khanmohammadi"> Sohrab Khanmohammadi</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Face and facial expressions play essential roles in interpersonal communication. Most of the current works on the facial expression recognition attempt to recognize a small set of the prototypic expressions such as happy, surprise, anger, sad, disgust and fear. However the most of the human emotions are communicated by changes in one or two of discrete features. In this paper, we develop a facial expressions synthesis system, based on the facial characteristic points (FCP's) tracking in the frontal image sequences. Selected FCP's are automatically tracked using a crosscorrelation based optical flow. The proposed synthesis system uses a simple deformable facial features model with a few set of control points that can be tracked in original facial image sequences. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Deformable%20face%20model" title="Deformable face model">Deformable face model</a>, <a href="https://publications.waset.org/search?q=facial%20animation" title=" facial animation"> facial animation</a>, <a href="https://publications.waset.org/search?q=facialcharacteristic%20points" title=" facialcharacteristic points"> facialcharacteristic points</a>, <a href="https://publications.waset.org/search?q=optical%20flow." title=" optical flow."> optical flow.</a> </p> <a href="https://publications.waset.org/14909/facial-expressions-animation-and-lip-tracking-using-facial-characteristic-points-and-deformable-model" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/14909/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/14909/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/14909/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/14909/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/14909/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/14909/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/14909/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/14909/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/14909/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/14909/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/14909.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1642</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">155</span> Further the Future: The Exploratory Study in 3D Animation Marketing Trend and Industry in Thailand</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Pawit%20Mongkolprasit">Pawit Mongkolprasit</a>, <a href="https://publications.waset.org/search?q=Proud%20Arunrangsiwed"> Proud Arunrangsiwed</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Lately, many media organizations in Thailand have started to produce 3D animation, so the quality of personnel should be identified. As an instructor in the school of Animation and Multimedia, the researchers have to prepare the students, suitable for the need of industry. The current study used exploratory research design to establish the knowledge of about this issue, including the required qualification of employees and the potential of animation industry in Thailand. The interview sessions involved three key informants from three well-known organizations. The interview data was used to design a questionnaire for the confirmation phase. The overall results showed that the industry needed an individual with 3D animation skill, computer graphic skills, good communication skills, a high responsibility, and an ability to finish the project on time. Moreover, it is also found that there were currently various kinds of media where 3D animation has been involved, such as films, TV variety, TV advertising, online advertising, and application on mobile device. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Animation" title="Animation">Animation</a>, <a href="https://publications.waset.org/search?q=marketing%20trend" title=" marketing trend"> marketing trend</a>, <a href="https://publications.waset.org/search?q=animation%20industry" title=" animation industry"> animation industry</a>, <a href="https://publications.waset.org/search?q=Thailand%20animation." title=" Thailand animation. "> Thailand animation. </a> </p> <a href="https://publications.waset.org/10004914/further-the-future-the-exploratory-study-in-3d-animation-marketing-trend-and-industry-in-thailand" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10004914/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10004914/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10004914/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10004914/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10004914/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10004914/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10004914/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10004914/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10004914/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10004914/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10004914.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1554</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">154</span> Competitiveness of Animation Industry: The Case of Thailand</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=T.%20Niracharapa">T. Niracharapa</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>The research studied and examined the competitiveness of the animation industry in Thailand. Data were collected based on articles, related reports and websites, news, research, and interviews of key persons from both public and private sectors. The diamond model was used to analyze the study. The major factor driving the Thai animation industry forward includes a quality workforce, their creativity and strong associations. However, discontinuity in government support, infrastructure, marketing, IP creation and financial constraints were factors keeping the Thai animation industry less competitive in the global market.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Animation" title="Animation">Animation</a>, <a href="https://publications.waset.org/search?q=competitiveness" title=" competitiveness"> competitiveness</a>, <a href="https://publications.waset.org/search?q=digital%20content" title=" digital content"> digital content</a>, <a href="https://publications.waset.org/search?q=Thailand." title=" Thailand."> Thailand.</a> </p> <a href="https://publications.waset.org/9998873/competitiveness-of-animation-industry-the-case-of-thailand" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/9998873/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/9998873/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/9998873/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/9998873/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/9998873/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/9998873/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/9998873/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/9998873/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/9998873/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/9998873/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/9998873.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">4757</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">153</span> A Talking Head System for Korean Text</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Sang-Wan%20Kim">Sang-Wan Kim</a>, <a href="https://publications.waset.org/search?q=Hoon%20Lee"> Hoon Lee</a>, <a href="https://publications.waset.org/search?q=Kyung-Ho%20Choi"> Kyung-Ho Choi</a>, <a href="https://publications.waset.org/search?q=Soon-Young%20Park"> Soon-Young Park</a> </p> <p class="card-text"><strong>Abstract:</strong></p> A talking head system (THS) is presented to animate the face of a speaking 3D avatar in such a way that it realistically pronounces the given Korean text. The proposed system consists of SAPI compliant text-to-speech (TTS) engine and MPEG-4 compliant face animation generator. The input to the THS is a unicode text that is to be spoken with synchronized lip shape. The TTS engine generates a phoneme sequence with their duration and audio data. The TTS applies the coarticulation rules to the phoneme sequence and sends a mouth animation sequence to the face modeler. The proposed THS can make more natural lip sync and facial expression by using the face animation generator than those using the conventional visemes only. The experimental results show that our system has great potential for the implementation of talking head for Korean text. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Talking%20head" title="Talking head">Talking head</a>, <a href="https://publications.waset.org/search?q=Lip%20sync" title=" Lip sync"> Lip sync</a>, <a href="https://publications.waset.org/search?q=TTS" title=" TTS"> TTS</a>, <a href="https://publications.waset.org/search?q=MPEG4." title=" MPEG4."> MPEG4.</a> </p> <a href="https://publications.waset.org/4410/a-talking-head-system-for-korean-text" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/4410/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/4410/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/4410/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/4410/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/4410/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/4410/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/4410/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/4410/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/4410/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/4410/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/4410.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1505</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">152</span> A Survey on Facial Feature Points Detection Techniques and Approaches</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Rachid%20Ahdid">Rachid Ahdid</a>, <a href="https://publications.waset.org/search?q=Khaddouj%20Taifi"> Khaddouj Taifi</a>, <a href="https://publications.waset.org/search?q=Said%20Safi"> Said Safi</a>, <a href="https://publications.waset.org/search?q=Bouzid%20Manaut"> Bouzid Manaut</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Automatic detection of facial feature points plays an important role in applications such as facial feature tracking, human-machine interaction and face recognition. The majority of facial feature points detection methods using two-dimensional or three-dimensional data are covered in existing survey papers. In this article chosen approaches to the facial features detection have been gathered and described. This overview focuses on the class of researches exploiting facial feature points detection to represent facial surface for two-dimensional or three-dimensional face. In the conclusion, we discusses advantages and disadvantages of the presented algorithms. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Facial%20feature%20points" title="Facial feature points">Facial feature points</a>, <a href="https://publications.waset.org/search?q=face%20recognition" title=" face recognition"> face recognition</a>, <a href="https://publications.waset.org/search?q=facial%20feature%0D%0Atracking" title=" facial feature tracking"> facial feature tracking</a>, <a href="https://publications.waset.org/search?q=two-dimensional%20data" title=" two-dimensional data"> two-dimensional data</a>, <a href="https://publications.waset.org/search?q=three-dimensional%20data." title=" three-dimensional data."> three-dimensional data.</a> </p> <a href="https://publications.waset.org/10005826/a-survey-on-facial-feature-points-detection-techniques-and-approaches" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10005826/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10005826/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10005826/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10005826/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10005826/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10005826/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10005826/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10005826/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10005826/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10005826/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10005826.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1693</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">151</span> An Efficient 3D Animation Data Reduction Using Frame Removal</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Jinsuk%20Yang">Jinsuk Yang</a>, <a href="https://publications.waset.org/search?q=Choongjae%20Joo"> Choongjae Joo</a>, <a href="https://publications.waset.org/search?q=Kyoungsu%20Oh"> Kyoungsu Oh</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>Existing methods in which the animation data of all frames are stored and reproduced as with vertex animation cannot be used in mobile device environments because these methods use large amounts of the memory. So 3D animation data reduction methods aimed at solving this problem have been extensively studied thus far and we propose a new method as follows. First, we find and remove frames in which motion changes are small out of all animation frames and store only the animation data of remaining frames (involving large motion changes). When playing the animation, the removed frame areas are reconstructed using the interpolation of the remaining frames. Our key contribution is to calculate the accelerations of the joints of individual frames and the standard deviations of the accelerations using the information of joint locations in the relevant 3D model in order to find and delete frames in which motion changes are small. Our methods can reduce data sizes by approximately 50% or more while providing quality which is not much lower compared to original animations. Therefore, our method is expected to be usefully used in mobile device environments or other environments in which memory sizes are limited.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Data%20Reduction" title="Data Reduction">Data Reduction</a>, <a href="https://publications.waset.org/search?q=Interpolation" title=" Interpolation"> Interpolation</a>, <a href="https://publications.waset.org/search?q=Vertex%20Animation" title=" Vertex Animation"> Vertex Animation</a>, <a href="https://publications.waset.org/search?q=3D%0D%0AAnimation." title=" 3D Animation."> 3D Animation.</a> </p> <a href="https://publications.waset.org/5894/an-efficient-3d-animation-data-reduction-using-frame-removal" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/5894/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/5894/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/5894/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/5894/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/5894/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/5894/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/5894/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/5894/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/5894/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/5894/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/5894.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1678</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">150</span> Educational and Technological Perspectives in Doraemon - Hope and Dreams in Doraemon鈥檚 Gadgets</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Miho%20Tsukamoto">Miho Tsukamoto</a> </p> <p class="card-text"><strong>Abstract:</strong></p> A Japanese manga character, Doraemon, was made by Fujiko F. Fujio in 1969, was made into animation in 1973. The main character, Doraemon, is a robot cat, and is a well-known Japanese animated character. However, Doraemon is not only regarded as an animation character but it is also used in educational and technological programs in Japan. This paper focuses on the background of Doraemon, educational and technological perspectives on Doraemon, and comparison of the original Japanese animation and the US remade version, and the animator Fujiko鈥檚 dreams and hopes for Doraemon will be examined. Since Doraemon has been exported as animation and manga to overseas, perspectives toward Doraemon have changed. For example, changes of stories and characters can been seen in the present Doraemon animation. Not only the overseas TV productions which broadcast Doraemon but also the Japanese production has to consider violence, sexuality, etc. when editing episodes. Because of representation of cultural differences, Japanese animation is thought to contain more violence, discrimination, and sexuality in animation. With responses from overseas, the Japanese production was cautious about the US remade version. They cared about the US Broadcast Standard, and tried to consider US customs and culture in the US remade version. Seeing the difference, acculturation is necessary for exports of animation overseas. Moreover, observing different aspects of Doraemon domestically, Doraemon provides dreams and hopes to children. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Animation" title="Animation">Animation</a>, <a href="https://publications.waset.org/search?q=Change" title=" Change"> Change</a>, <a href="https://publications.waset.org/search?q=Doraemon" title=" Doraemon"> Doraemon</a>, <a href="https://publications.waset.org/search?q=Gadgets" title=" Gadgets"> Gadgets</a>, <a href="https://publications.waset.org/search?q=Manga" title=" Manga"> Manga</a>, <a href="https://publications.waset.org/search?q=Technology." title=" Technology."> Technology.</a> </p> <a href="https://publications.waset.org/10001577/educational-and-technological-perspectives-in-doraemon-hope-and-dreams-in-doraemons-gadgets" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10001577/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10001577/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10001577/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10001577/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10001577/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10001577/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10001577/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10001577/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10001577/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10001577/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10001577.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">5598</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">149</span> Temporally Coherent 3D Animation Reconstruction from RGB-D Video Data</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Salam%20Khalifa">Salam Khalifa</a>, <a href="https://publications.waset.org/search?q=Naveed%20Ahmed"> Naveed Ahmed</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>We present a new method to reconstruct a temporally coherent 3D animation from single or multi-view RGB-D video data using unbiased feature point sampling. Given RGB-D video data, in form of a 3D point cloud sequence, our method first extracts feature points using both color and depth information. In the subsequent steps, these feature points are used to match two 3D point clouds in consecutive frames independent of their resolution. Our new motion vectors based dynamic alignement method then fully reconstruct a spatio-temporally coherent 3D animation. We perform extensive quantitative validation using novel error functions to analyze the results. We show that despite the limiting factors of temporal and spatial noise associated to RGB-D data, it is possible to extract temporal coherence to faithfully reconstruct a temporally coherent 3D animation from RGB-D video data.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=3D%20video" title="3D video">3D video</a>, <a href="https://publications.waset.org/search?q=3D%20animation" title=" 3D animation"> 3D animation</a>, <a href="https://publications.waset.org/search?q=RGB-D%20video" title=" RGB-D video"> RGB-D video</a>, <a href="https://publications.waset.org/search?q=Temporally%0D%0ACoherent%203D%20Animation." title=" Temporally Coherent 3D Animation."> Temporally Coherent 3D Animation.</a> </p> <a href="https://publications.waset.org/9999406/temporally-coherent-3d-animation-reconstruction-from-rgb-d-video-data" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/9999406/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/9999406/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/9999406/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/9999406/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/9999406/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/9999406/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/9999406/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/9999406/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/9999406/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/9999406/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/9999406.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2085</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">148</span> 3D Definition for Human Smiles</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Shyue-Ran%20Li">Shyue-Ran Li</a>, <a href="https://publications.waset.org/search?q=Kuohsiang%20Chen"> Kuohsiang Chen</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The study explored varied types of human smiles and extracted most of the key factors affecting the smiles. These key factors then were converted into a set of control points which could serve to satisfy the needs for creation of facial expression for 3D animators and be further applied to the face simulation for robots in the future. First, hundreds of human smile pictures were collected and analyzed to identify the key factors for face expression. Then, the factors were converted into a set of control points and sizing parameters calculated proportionally. Finally, two different faces were constructed for validating the parameters via the process of simulating smiles of the same type as the original one. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=3D%20animation" title="3D animation">3D animation</a>, <a href="https://publications.waset.org/search?q=facial%20expression" title=" facial expression"> facial expression</a>, <a href="https://publications.waset.org/search?q=numerical" title=" numerical"> numerical</a>, <a href="https://publications.waset.org/search?q=robot" title=" robot"> robot</a>, <a href="https://publications.waset.org/search?q=smile%20parameter." title="smile parameter.">smile parameter.</a> </p> <a href="https://publications.waset.org/209/3d-definition-for-human-smiles" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/209/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/209/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/209/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/209/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/209/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/209/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/209/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/209/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/209/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/209/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/209.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1518</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">147</span> Automatic Facial Skin Segmentation Using Possibilistic C-Means Algorithm for Evaluation of Facial Surgeries</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Elham%20Alaee">Elham Alaee</a>, <a href="https://publications.waset.org/search?q=Mousa%20Shamsi"> Mousa Shamsi</a>, <a href="https://publications.waset.org/search?q=Hossein%20Ahmadi"> Hossein Ahmadi</a>, <a href="https://publications.waset.org/search?q=Soroosh%20Nazem"> Soroosh Nazem</a>, <a href="https://publications.waset.org/search?q=Mohammadhossein%20Sedaaghi"> Mohammadhossein Sedaaghi</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>Human face has a fundamental role in the appearance of individuals. So the importance of facial surgeries is undeniable. Thus, there is a need for the appropriate and accurate facial skin segmentation in order to extract different features. Since Fuzzy CMeans (FCM) clustering algorithm doesn’t work appropriately for noisy images and outliers, in this paper we exploit Possibilistic CMeans (PCM) algorithm in order to segment the facial skin. For this purpose, first, we convert facial images from RGB to YCbCr color space. To evaluate performance of the proposed algorithm, the database of Sahand University of Technology, Tabriz, Iran was used. In order to have a better understanding from the proposed algorithm; FCM and Expectation-Maximization (EM) algorithms are also used for facial skin segmentation. The proposed method shows better results than the other segmentation methods. Results include misclassification error (0.032) and the region’s area error (0.045) for the proposed algorithm.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Facial%20image" title="Facial image">Facial image</a>, <a href="https://publications.waset.org/search?q=segmentation" title=" segmentation"> segmentation</a>, <a href="https://publications.waset.org/search?q=PCM" title=" PCM"> PCM</a>, <a href="https://publications.waset.org/search?q=FCM" title=" FCM"> FCM</a>, <a href="https://publications.waset.org/search?q=skin%20error" title=" skin error"> skin error</a>, <a href="https://publications.waset.org/search?q=facial%20surgery." title=" facial surgery."> facial surgery.</a> </p> <a href="https://publications.waset.org/9998526/automatic-facial-skin-segmentation-using-possibilistic-c-means-algorithm-for-evaluation-of-facial-surgeries" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/9998526/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/9998526/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/9998526/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/9998526/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/9998526/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/9998526/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/9998526/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/9998526/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/9998526/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/9998526/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/9998526.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2008</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">146</span> Implementation of Sprite Animation for Multimedia Application</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Ms.%20Yi%20Mon%20Thant">Ms. Yi Mon Thant</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Animation is simply defined as the sequencing of a series of static images to generate the illusion of movement. Most people believe that actual drawings or creation of the individual images is the animation, when in actuality it is the arrangement of those static images that conveys the motion. To become an animator, it is often assumed that needed the ability to quickly design masterpiece after masterpiece. Although some semblance of artistic skill is a necessity for the job, the real key to becoming a great animator is in the comprehension of timing. This paper will use a combination of sprite animation, frame animation, and some other techniques to cause a group of multi-colored static images to slither around in the bounded area. In addition to slithering, the images will also change the color of different parts of their body, much like the real world creatures that have this amazing ability to change the colors on their bodies do. This paper was implemented by using Java 2 Standard Edition (J2SE). It is both time-consuming and expensive to create animations, regardless if they are created by hand or by using motion-capture equipment. If the animators could reuse old animations and even blend different animations together, a lot of work would be saved in the process. The main objective of this paper is to examine a method for blending several animations together in real time. This paper presents and analyses a solution using Weighted Skeleton Animation (WSA) resulting in limited CPU time and memory waste as well as saving time for the animators. The idea presented is described in detail and implemented. In this paper, text animation, vertex animation, sprite part animation and whole sprite animation were tested. In this research paper, the resolution, smoothness and movement of animated images will be carried out from the parameters, which will be obtained from the experimental research of implementing this paper. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Weighted%20Skeleton%20Animation" title="Weighted Skeleton Animation">Weighted Skeleton Animation</a> </p> <a href="https://publications.waset.org/2197/implementation-of-sprite-animation-for-multimedia-application" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/2197/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/2197/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/2197/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/2197/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/2197/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/2197/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/2197/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/2197/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/2197/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/2197/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/2197.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1853</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">145</span> Fast Facial Feature Extraction and Matching with Artificial Face Models</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Y.%20H.%20Tsai">Y. H. Tsai</a>, <a href="https://publications.waset.org/search?q=Y.%20W.%20Chen"> Y. W. Chen</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Facial features are frequently used to represent local properties of a human face image in computer vision applications. In this paper, we present a fast algorithm that can extract the facial features online such that they can give a satisfying representation of a face image. It includes one step for a coarse detection of each facial feature by AdaBoost and another one to increase the accuracy of the found points by Active Shape Models (ASM) in the regions of interest. The resulted facial features are evaluated by matching with artificial face models in the applications of physiognomy. The distance measure between the features and those in the fate models from the database is carried out by means of the Hausdorff distance. In the experiment, the proposed method shows the efficient performance in facial feature extractions and online system of physiognomy. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Facial%20feature%20extraction" title="Facial feature extraction">Facial feature extraction</a>, <a href="https://publications.waset.org/search?q=AdaBoost" title=" AdaBoost"> AdaBoost</a>, <a href="https://publications.waset.org/search?q=Active%20shapemodel" title=" Active shapemodel"> Active shapemodel</a>, <a href="https://publications.waset.org/search?q=Hausdorff%20distance" title=" Hausdorff distance"> Hausdorff distance</a> </p> <a href="https://publications.waset.org/5776/fast-facial-feature-extraction-and-matching-with-artificial-face-models" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/5776/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/5776/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/5776/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/5776/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/5776/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/5776/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/5776/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/5776/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/5776/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/5776/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/5776.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1827</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">144</span> Facial Expression Phoenix (FePh): An Annotated Sequenced Dataset for Facial and Emotion-Specified Expressions in Sign Language</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Marie%20Alaghband">Marie Alaghband</a>, <a href="https://publications.waset.org/search?q=Niloofar%20Yousefi"> Niloofar Yousefi</a>, <a href="https://publications.waset.org/search?q=Ivan%20Garibay"> Ivan Garibay</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Facial expressions are important parts of both gesture and sign language recognition systems. Despite the recent advances in both fields, annotated facial expression datasets in the context of sign language are still scarce resources. In this manuscript, we introduce an annotated sequenced facial expression dataset in the context of sign language, comprising over 3000 facial images extracted from the daily news and weather forecast of the public tv-station PHOENIX. Unlike the majority of currently existing facial expression datasets, FePh provides sequenced semi-blurry facial images with different head poses, orientations, and movements. In addition, in the majority of images, identities are mouthing the words, which makes the data more challenging. To annotate this dataset we consider primary, secondary, and tertiary dyads of seven basic emotions of "sad", "surprise", "fear", "angry", "neutral", "disgust", and "happy". We also considered the "None" class if the image’s facial expression could not be described by any of the aforementioned emotions. Although we provide FePh as a facial expression dataset of signers in sign language, it has a wider application in gesture recognition and Human Computer Interaction (HCI) systems. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Annotated%20Facial%20Expression%20Dataset" title="Annotated Facial Expression Dataset">Annotated Facial Expression Dataset</a>, <a href="https://publications.waset.org/search?q=Sign%20Language%0D%0ARecognition" title=" Sign Language Recognition"> Sign Language Recognition</a>, <a href="https://publications.waset.org/search?q=Gesture%20Recognition" title=" Gesture Recognition"> Gesture Recognition</a>, <a href="https://publications.waset.org/search?q=Sequenced%20Facial%20Expression%0D%0ADataset." title=" Sequenced Facial Expression Dataset."> Sequenced Facial Expression Dataset.</a> </p> <a href="https://publications.waset.org/10011933/facial-expression-phoenix-feph-an-annotated-sequenced-dataset-for-facial-and-emotion-specified-expressions-in-sign-language" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10011933/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10011933/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10011933/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10011933/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10011933/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10011933/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10011933/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10011933/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10011933/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10011933/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10011933.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">746</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">143</span> An Efficient Algorithm for Motion Detection Based Facial Expression Recognition using Optical Flow</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Ahmad%20R.%20Naghsh-Nilchi">Ahmad R. Naghsh-Nilchi</a>, <a href="https://publications.waset.org/search?q=Mohammad%20Roshanzamir"> Mohammad Roshanzamir</a> </p> <p class="card-text"><strong>Abstract:</strong></p> One of the popular methods for recognition of facial expressions such as happiness, sadness and surprise is based on deformation of facial features. Motion vectors which show these deformations can be specified by the optical flow. In this method, for detecting emotions, the resulted set of motion vectors are compared with standard deformation template that caused by facial expressions. In this paper, a new method is introduced to compute the quantity of likeness in order to make decision based on the importance of obtained vectors from an optical flow approach. For finding the vectors, one of the efficient optical flow method developed by Gautama and VanHulle[17] is used. The suggested method has been examined over Cohn-Kanade AU-Coded Facial Expression Database, one of the most comprehensive collections of test images available. The experimental results show that our method could correctly recognize the facial expressions in 94% of case studies. The results also show that only a few number of image frames (three frames) are sufficient to detect facial expressions with rate of success of about 83.3%. This is a significant improvement over the available methods. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Facial%20expression" title="Facial expression">Facial expression</a>, <a href="https://publications.waset.org/search?q=Facial%20features" title=" Facial features"> Facial features</a>, <a href="https://publications.waset.org/search?q=Optical%20flow" title=" Optical flow"> Optical flow</a>, <a href="https://publications.waset.org/search?q=Motion%20vectors." title="Motion vectors.">Motion vectors.</a> </p> <a href="https://publications.waset.org/6512/an-efficient-algorithm-for-motion-detection-based-facial-expression-recognition-using-optical-flow" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/6512/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/6512/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/6512/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/6512/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/6512/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/6512/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/6512/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/6512/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/6512/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/6512/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/6512.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2387</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">142</span> Human Facial Expression Recognition using MANFIS Model</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=V.%20Gomathi">V. Gomathi</a>, <a href="https://publications.waset.org/search?q=Dr.%20K.%20Ramar"> Dr. K. Ramar</a>, <a href="https://publications.waset.org/search?q=A.%20Santhiyaku%20Jeevakumar"> A. Santhiyaku Jeevakumar</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Facial expression analysis plays a significant role for human computer interaction. Automatic analysis of human facial expression is still a challenging problem with many applications. In this paper, we propose neuro-fuzzy based automatic facial expression recognition system to recognize the human facial expressions like happy, fear, sad, angry, disgust and surprise. Initially facial image is segmented into three regions from which the uniform Local Binary Pattern (LBP) texture features distributions are extracted and represented as a histogram descriptor. The facial expressions are recognized using Multiple Adaptive Neuro Fuzzy Inference System (MANFIS). The proposed system designed and tested with JAFFE face database. The proposed model reports 94.29% of classification accuracy. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Adaptive%20neuro-fuzzy%20inference%20system" title="Adaptive neuro-fuzzy inference system">Adaptive neuro-fuzzy inference system</a>, <a href="https://publications.waset.org/search?q=Facialexpression" title=" Facialexpression"> Facialexpression</a>, <a href="https://publications.waset.org/search?q=Local%20binary%20pattern" title=" Local binary pattern"> Local binary pattern</a>, <a href="https://publications.waset.org/search?q=Uniform%20Histogram" title=" Uniform Histogram"> Uniform Histogram</a> </p> <a href="https://publications.waset.org/13988/human-facial-expression-recognition-using-manfis-model" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/13988/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/13988/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/13988/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/13988/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/13988/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/13988/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/13988/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/13988/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/13988/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/13988/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/13988.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2110</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">141</span> The Effect of Facial Expressions on Students in Virtual Educational Environments</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=G.%20Theonas">G. Theonas</a>, <a href="https://publications.waset.org/search?q=D.%20Hobbs"> D. Hobbs</a>, <a href="https://publications.waset.org/search?q=D.%20Rigas"> D. Rigas</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>The scope of this research was to study the relation between the facial expressions of three lecturers in a real academic lecture theatre and the reactions of the students to those expressions. The first experiment aimed to investigate the effectiveness of a virtual lecturer-s expressions on the students- learning outcome in a virtual pedagogical environment. The second experiment studied the effectiveness of a single facial expression, i.e. the smile, on the students- performance. Both experiments involved virtual lectures, with virtual lecturers teaching real students. The results suggest that the students performed better by 86%, in the lectures where the lecturer performed facial expressions compared to the results of the lectures that did not use facial expressions. However, when simple or basic information was used, the facial expressions of the virtual lecturer had no substantial effect on the students- learning outcome. Finally, the appropriate use of smiles increased the interest of the students and consequently their performance.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=emotion" title="emotion">emotion</a>, <a href="https://publications.waset.org/search?q=facial%20expression" title=" facial expression"> facial expression</a>, <a href="https://publications.waset.org/search?q=smile" title=" smile"> smile</a>, <a href="https://publications.waset.org/search?q=virtual%20educational%20environment" title=" virtual educational environment"> virtual educational environment</a>, <a href="https://publications.waset.org/search?q=virtual%20learning" title=" virtual learning"> virtual learning</a>, <a href="https://publications.waset.org/search?q=virtual%20lecturer." title=" virtual lecturer."> virtual lecturer.</a> </p> <a href="https://publications.waset.org/3623/the-effect-of-facial-expressions-on-students-in-virtual-educational-environments" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/3623/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/3623/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/3623/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/3623/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/3623/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/3623/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/3623/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/3623/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/3623/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/3623/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/3623.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2004</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">140</span> Comparing Emotion Recognition from Voice and Facial Data Using Time Invariant Features</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Vesna%20Kirandziska">Vesna Kirandziska</a>, <a href="https://publications.waset.org/search?q=Nevena%20Ackovska"> Nevena Ackovska</a>, <a href="https://publications.waset.org/search?q=Ana%20Madevska%20Bogdanova"> Ana Madevska Bogdanova</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>The problem of emotion recognition is a challenging problem. It is still an open problem from the aspect of both intelligent systems and psychology. In this paper, both voice features and facial features are used for building an emotion recognition system. A Support Vector Machine classifiers are built by using raw data from video recordings. In this paper, the results obtained for the emotion recognition are given, and a discussion about the validity and the expressiveness of different emotions is presented. A comparison between the classifiers build from facial data only, voice data only and from the combination of both data is made here. The need for a better combination of the information from facial expression and voice data is argued.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Emotion%20recognition" title="Emotion recognition">Emotion recognition</a>, <a href="https://publications.waset.org/search?q=facial%20recognition" title=" facial recognition"> facial recognition</a>, <a href="https://publications.waset.org/search?q=signal%20processing" title=" signal processing"> signal processing</a>, <a href="https://publications.waset.org/search?q=machine%20learning." title=" machine learning. "> machine learning. </a> </p> <a href="https://publications.waset.org/10004221/comparing-emotion-recognition-from-voice-and-facial-data-using-time-invariant-features" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10004221/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10004221/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10004221/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10004221/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10004221/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10004221/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10004221/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10004221/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10004221/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10004221/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10004221.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2042</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">139</span> Improving the Performance of Deep Learning in Facial Emotion Recognition with Image Sharpening</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Ksheeraj%20Sai%20Vepuri">Ksheeraj Sai Vepuri</a>, <a href="https://publications.waset.org/search?q=Nada%20Attar"> Nada Attar</a> </p> <p class="card-text"><strong>Abstract:</strong></p> We as humans use words with accompanying visual and facial cues to communicate effectively. Classifying facial emotion using computer vision methodologies has been an active research area in the computer vision field. In this paper, we propose a simple method for facial expression recognition that enhances accuracy. We tested our method on the FER-2013 dataset that contains static images. Instead of using Histogram equalization to preprocess the dataset, we used Unsharp Mask to emphasize texture and details and sharpened the edges. We also used ImageDataGenerator from Keras library for data augmentation. Then we used Convolutional Neural Networks (CNN) model to classify the images into 7 different facial expressions, yielding an accuracy of 69.46% on the test set. Our results show that using image preprocessing such as the sharpening technique for a CNN model can improve the performance, even when the CNN model is relatively simple. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Facial%20expression%20recognition" title="Facial expression recognition">Facial expression recognition</a>, <a href="https://publications.waset.org/search?q=image%20pre-processing" title=" image pre-processing"> image pre-processing</a>, <a href="https://publications.waset.org/search?q=deep%20learning" title=" deep learning"> deep learning</a>, <a href="https://publications.waset.org/search?q=CNN." title=" CNN. "> CNN. </a> </p> <a href="https://publications.waset.org/10011940/improving-the-performance-of-deep-learning-in-facial-emotion-recognition-with-image-sharpening" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10011940/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10011940/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10011940/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10011940/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10011940/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10011940/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10011940/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10011940/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10011940/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10011940/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10011940.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">557</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">138</span> RBF Based Face Recognition and Expression Analysis</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Praseeda%20Lekshmi.V">Praseeda Lekshmi.V</a>, <a href="https://publications.waset.org/search?q=Dr.M.Sasikumar"> Dr.M.Sasikumar</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Facial recognition and expression analysis is rapidly becoming an area of intense interest in computer science and humancomputer interaction design communities. The most expressive way humans display emotions is through facial expressions. In this paper skin and non-skin pixels were separated. Face regions were extracted from the detected skin regions. Facial expressions are analyzed from facial images by applying Gabor wavelet transform (GWT) and Discrete Cosine Transform (DCT) on face images. Radial Basis Function (RBF) Network is used to identify the person and to classify the facial expressions. Our method reliably works even with faces, which carry heavy expressions. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Face%20Recognition" title="Face Recognition">Face Recognition</a>, <a href="https://publications.waset.org/search?q=Radial%20Basis%20Function" title=" Radial Basis Function"> Radial Basis Function</a>, <a href="https://publications.waset.org/search?q=Gabor%20Wavelet%20Transform" title="Gabor Wavelet Transform">Gabor Wavelet Transform</a>, <a href="https://publications.waset.org/search?q=Discrete%20Cosine%20Transform" title=" Discrete Cosine Transform"> Discrete Cosine Transform</a> </p> <a href="https://publications.waset.org/9404/rbf-based-face-recognition-and-expression-analysis" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/9404/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/9404/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/9404/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/9404/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/9404/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/9404/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/9404/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/9404/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/9404/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/9404/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/9404.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1606</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">137</span> A Neural Network Based Facial Expression Analysis using Gabor Wavelets</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Praseeda%20Lekshmi.V">Praseeda Lekshmi.V</a>, <a href="https://publications.waset.org/search?q=Dr.M.Sasikumar"> Dr.M.Sasikumar</a> </p> <p class="card-text"><strong>Abstract:</strong></p> Facial expression analysis is rapidly becoming an area of intense interest in computer science and human-computer interaction design communities. The most expressive way humans display emotions is through facial expressions. In this paper we present a method to analyze facial expression from images by applying Gabor wavelet transform (GWT) and Discrete Cosine Transform (DCT) on face images. Radial Basis Function (RBF) Network is used to classify the facial expressions. As a second stage, the images are preprocessed to enhance the edge details and non uniform down sampling is done to reduce the computational complexity and processing time. Our method reliably works even with faces, which carry heavy expressions. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Face%20Expression" title="Face Expression">Face Expression</a>, <a href="https://publications.waset.org/search?q=Radial%20Basis%20Function" title=" Radial Basis Function"> Radial Basis Function</a>, <a href="https://publications.waset.org/search?q=GaborWavelet%20Transform" title=" GaborWavelet Transform"> GaborWavelet Transform</a>, <a href="https://publications.waset.org/search?q=Human%20Computer%20Interaction." title=" Human Computer Interaction."> Human Computer Interaction.</a> </p> <a href="https://publications.waset.org/2741/a-neural-network-based-facial-expression-analysis-using-gabor-wavelets" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/2741/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/2741/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/2741/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/2741/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/2741/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/2741/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/2741/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/2741/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/2741/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/2741/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/2741.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2117</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">136</span> Optimized Facial Features-based Age Classification</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Md.%20Zahangir%20Alom">Md. Zahangir Alom</a>, <a href="https://publications.waset.org/search?q=Mei-Lan%20Piao"> Mei-Lan Piao</a>, <a href="https://publications.waset.org/search?q=Md.%20Shariful%20Islam"> Md. Shariful Islam</a>, <a href="https://publications.waset.org/search?q=Nam%20Kim"> Nam Kim</a>, <a href="https://publications.waset.org/search?q=Jae-Hyeung%20Park"> Jae-Hyeung Park</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The evaluation and measurement of human body dimensions are achieved by physical anthropometry. This research was conducted in view of the importance of anthropometric indices of the face in forensic medicine, surgery, and medical imaging. The main goal of this research is to optimization of facial feature point by establishing a mathematical relationship among facial features and used optimize feature points for age classification. Since selected facial feature points are located to the area of mouth, nose, eyes and eyebrow on facial images, all desire facial feature points are extracted accurately. According this proposes method; sixteen Euclidean distances are calculated from the eighteen selected facial feature points vertically as well as horizontally. The mathematical relationships among horizontal and vertical distances are established. Moreover, it is also discovered that distances of the facial feature follows a constant ratio due to age progression. The distances between the specified features points increase with respect the age progression of a human from his or her childhood but the ratio of the distances does not change (d = 1 .618 ) . Finally, according to the proposed mathematical relationship four independent feature distances related to eight feature points are selected from sixteen distances and eighteen feature point-s respectively. These four feature distances are used for classification of age using Support Vector Machine (SVM)-Sequential Minimal Optimization (SMO) algorithm and shown around 96 % accuracy. Experiment result shows the proposed system is effective and accurate for age classification. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=3D%20Face%20Model" title="3D Face Model">3D Face Model</a>, <a href="https://publications.waset.org/search?q=Face%20Anthropometrics" title=" Face Anthropometrics"> Face Anthropometrics</a>, <a href="https://publications.waset.org/search?q=Facial%20Features%0AExtraction" title=" Facial Features Extraction"> Facial Features Extraction</a>, <a href="https://publications.waset.org/search?q=Feature%20distances" title=" Feature distances"> Feature distances</a>, <a href="https://publications.waset.org/search?q=SVM-SMO" title=" SVM-SMO"> SVM-SMO</a> </p> <a href="https://publications.waset.org/6689/optimized-facial-features-based-age-classification" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/6689/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/6689/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/6689/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/6689/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/6689/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/6689/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/6689/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/6689/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/6689/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/6689/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/6689.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2058</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">135</span> Animation of Objects on the Website by Application of CSS3 Language</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Vladimir%20Simovic">Vladimir Simovic</a>, <a href="https://publications.waset.org/search?q=Matija%20Varga"> Matija Varga</a>, <a href="https://publications.waset.org/search?q=Robert%20Svetlacic"> Robert Svetlacic</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>Scientific work analytically explores and demonstrates techniques that can animate objects and geometric characters using CSS3 language by applying proper formatting and positioning of elements. This paper presents examples of optimum application of the CSS3 descriptive language when generating general web animations (e.g., billiards and movement of geometric characters, etc.). The paper presents analytically, the optimal development and animation design with the frames within which the animated objects are. The originally developed content is based on the upgrading of existing CSS3 descriptive language animations with more complex syntax and project-oriented work. The purpose of the developed animations is to provide an overview of the interactive features of CSS3 descriptive language design for computer games and the animation of important analytical data based on the web view. It has been analytically demonstrated that CSS3 as a descriptive language allows inserting of various multimedia elements into websites for public and internal sites.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Animation%20recording" title="Animation recording">Animation recording</a>, <a href="https://publications.waset.org/search?q=web%20page%20graphics" title=" web page graphics"> web page graphics</a>, <a href="https://publications.waset.org/search?q=HTML5%20forms" title=" HTML5 forms"> HTML5 forms</a>, <a href="https://publications.waset.org/search?q=Cascading%20Style%20Sheets%203%20-%20CSS3" title=" Cascading Style Sheets 3 - CSS3"> Cascading Style Sheets 3 - CSS3</a>, <a href="https://publications.waset.org/search?q=man-computer%20interaction" title=" man-computer interaction"> man-computer interaction</a>, <a href="https://publications.waset.org/search?q=KML%20animation%20presenting%20format" title=" KML animation presenting format"> KML animation presenting format</a>, <a href="https://publications.waset.org/search?q=GML" title=" GML"> GML</a>, <a href="https://publications.waset.org/search?q=Google%20Earth%20Professional." title=" Google Earth Professional."> Google Earth Professional.</a> </p> <a href="https://publications.waset.org/10008256/animation-of-objects-on-the-website-by-application-of-css3-language" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10008256/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10008256/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10008256/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10008256/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10008256/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10008256/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10008256/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10008256/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10008256/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10008256/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10008256.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">821</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">134</span> EMOES: Eye Motion and Ocular Expression Simulator</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Nicoletta%20Adamo-Villani">Nicoletta Adamo-Villani</a>, <a href="https://publications.waset.org/search?q=Gerardo%20Beni"> Gerardo Beni</a>, <a href="https://publications.waset.org/search?q=Jeremy%20White"> Jeremy White</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>We introduce, a new interactive 3D simulation system of ocular motion and expressions suitable for: (1) character animation applications to game design, film production, HCI (Human Computer Interface), conversational animated agents, and virtual reality; (2) medical applications (ophthalmic neurological and muscular pathologies: research and education); and (3) real time simulation of unconscious cognitive and emotional responses (for use, e.g., in psychological research). The system is comprised of: (1) a physiologically accurate parameterized 3D model of the eyes, eyelids, and eyebrow regions; and (2) a prototype device for realtime control of eye motions and expressions, including unconsciously produced expressions, for application as in (1), (2), and (3) above. The 3D eye simulation system, created using state-of-the-art computer animation technology and 'optimized' for use with an interactive and web deliverable platform, is, to our knowledge, the most advanced/realistic available so far for applications to character animation and medical pedagogy.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=3D%20animation" title="3D animation">3D animation</a>, <a href="https://publications.waset.org/search?q=HCI" title=" HCI"> HCI</a>, <a href="https://publications.waset.org/search?q=medical%20simulation" title=" medical simulation"> medical simulation</a>, <a href="https://publications.waset.org/search?q=ocularmotion%20and%20expression." title=" ocularmotion and expression."> ocularmotion and expression.</a> </p> <a href="https://publications.waset.org/13362/emoes-eye-motion-and-ocular-expression-simulator" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/13362/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/13362/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/13362/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/13362/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/13362/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/13362/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/13362/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/13362/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/13362/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/13362/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/13362.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1990</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">133</span> 3D Rendering of American Sign Language Finger-Spelling: A Comparative Study of Two Animation Techniques</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Nicoletta%20Adamo-Villani">Nicoletta Adamo-Villani</a> </p> <p class="card-text"><strong>Abstract:</strong></p> In this paper we report a study aimed at determining the most effective animation technique for representing ASL (American Sign Language) finger-spelling. Specifically, in the study we compare two commonly used 3D computer animation methods (keyframe animation and motion capture) in order to ascertain which technique produces the most 'accurate', 'readable', and 'close to actual signing' (i.e. realistic) rendering of ASL finger-spelling. To accomplish this goal we have developed 20 animated clips of fingerspelled words and we have designed an experiment consisting of a web survey with rating questions. 71 subjects ages 19-45 participated in the study. Results showed that recognition of the words was correlated with the method used to animate the signs. In particular, keyframe technique produced the most accurate representation of the signs (i.e., participants were more likely to identify the words correctly in keyframed sequences rather than in motion captured ones). Further, findings showed that the animation method had an effect on the reported scores for readability and closeness to actual signing; the estimated marginal mean readability and closeness was greater for keyframed signs than for motion captured signs. To our knowledge, this is the first study aimed at measuring and comparing accuracy, readability and realism of ASL animations produced with different techniques. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=3D%20Animation" title="3D Animation">3D Animation</a>, <a href="https://publications.waset.org/search?q=American%20Sign%20Language" title=" American Sign Language"> American Sign Language</a>, <a href="https://publications.waset.org/search?q=DeafEducation" title=" DeafEducation"> DeafEducation</a>, <a href="https://publications.waset.org/search?q=Motion%20Capture." title=" Motion Capture."> Motion Capture.</a> </p> <a href="https://publications.waset.org/11425/3d-rendering-of-american-sign-language-finger-spelling-a-comparative-study-of-two-animation-techniques" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/11425/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/11425/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/11425/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/11425/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/11425/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/11425/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/11425/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/11425/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/11425/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/11425/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/11425.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2013</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">132</span> Dynamic Visualization on Student's Performance, Retention and Transfer of Procedural Learning</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Fauzy%20M.%20Wan">Fauzy M. Wan</a>, <a href="https://publications.waset.org/search?q=Reem%20S.A.%20Baragash"> Reem S.A. Baragash</a> </p> <p class="card-text"><strong>Abstract:</strong></p> This study examined the effects of two dynamic visualizations on 60 Malaysian primary school student-s performance (time on task), retention and transference. The independent variables in this study were the two dynamic visualizations, the video and the animated instructions. The dependent variables were the gain score of performance, retention and transference. The results showed that the students in the animation group significantly outperformed the students in the video group in retention. There were no significant differences in terms of gain scores in the performance and transference among the animation and the video groups, although the scores were slightly higher in the animation group compared to the video group. The conclusion of this study is that the animation visualization is superior compared to the video in the retention for a procedural task. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Dynamic%20visualization" title="Dynamic visualization">Dynamic visualization</a>, <a href="https://publications.waset.org/search?q=Procedural%20Task" title=" Procedural Task"> Procedural Task</a>, <a href="https://publications.waset.org/search?q=Retention" title=" Retention"> Retention</a>, <a href="https://publications.waset.org/search?q=Transference" title=" Transference"> Transference</a> </p> <a href="https://publications.waset.org/10524/dynamic-visualization-on-students-performance-retention-and-transfer-of-procedural-learning" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10524/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10524/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10524/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10524/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10524/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10524/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10524/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10524/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10524/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10524/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10524.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1440</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">131</span> 3D Simulator of Ocular Motion and Expression</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Nicoletta%20Adamo-Villani">Nicoletta Adamo-Villani</a>, <a href="https://publications.waset.org/search?q=Gerardo%20Beni"> Gerardo Beni</a>, <a href="https://publications.waset.org/search?q=Jeremy%20White"> Jeremy White</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>We introduce a new interactive 3D simulator of ocular motion and expressions suitable for: (1) character animation applications to game design, film production, HCI (Human Computer Interface), conversational animated agents, and virtual reality; (2) medical applications (ophthalmic neurological and muscular pathologies: research and education); and (3) real time simulation of unconscious cognitive and emotional responses (for use, e.g., in psychological research). Using state-of-the-art computer animation technology we have modeled and rigged a physiologically accurate 3D model of the eyes, eyelids, and eyebrow regions and we have 'optimized' it for use with an interactive and web deliverable platform. In addition, we have realized a prototype device for realtime control of eye motions and expressions, including unconsciously produced expressions, for application as in (1), (2), and (3) above. The 3D simulator of eye motion and ocular expression is, to our knowledge, the most advanced/realistic available so far for applications in character animation and medical pedagogy.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=3D%20animation" title="3D animation">3D animation</a>, <a href="https://publications.waset.org/search?q=HCI" title=" HCI"> HCI</a>, <a href="https://publications.waset.org/search?q=medical%20simulation" title=" medical simulation"> medical simulation</a>, <a href="https://publications.waset.org/search?q=ocularmotion%20and%20expression." title=" ocularmotion and expression."> ocularmotion and expression.</a> </p> <a href="https://publications.waset.org/6787/3d-simulator-of-ocular-motion-and-expression" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/6787/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/6787/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/6787/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/6787/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/6787/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/6787/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/6787/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/6787/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/6787/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/6787/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/6787.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2243</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">130</span> PEIBM- Perceiving Emotions using an Intelligent Behavioral Model </h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Maryam%20Humayun">Maryam Humayun</a>, <a href="https://publications.waset.org/search?q=Zafar%20I.%20Malik"> Zafar I. Malik</a>, <a href="https://publications.waset.org/search?q=Shaukat%20Ali"> Shaukat Ali </a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>Computer animation is a widely adopted technique used to specify the movement of various objects on screen. The key issue of this technique is the specification of motion. Motion Control Methods are such methods which are used to specify the actions of objects. This paper discusses the various types of motion control methods with special focus on behavioral animation. A behavioral model is also proposed which takes into account the emotions and perceptions of an actor which in turn generate its behavior. This model makes use of an expert system to generate tasks for the actors which specify the actions to be performed in the virtual environment.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Behavioral%20animation" title="Behavioral animation">Behavioral animation</a>, <a href="https://publications.waset.org/search?q=emotion" title=" emotion"> emotion</a>, <a href="https://publications.waset.org/search?q=expert%20system" title=" expert system"> expert system</a>, <a href="https://publications.waset.org/search?q=perception." title=" perception."> perception.</a> </p> <a href="https://publications.waset.org/6785/peibm-perceiving-emotions-using-an-intelligent-behavioral-model" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/6785/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/6785/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/6785/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/6785/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/6785/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/6785/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/6785/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/6785/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/6785/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/6785/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/6785.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1405</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">129</span> Deep-Learning Based Approach to Facial Emotion Recognition Through Convolutional Neural Network</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Nouha%20Khediri">Nouha Khediri</a>, <a href="https://publications.waset.org/search?q=Mohammed%20Ben%20Ammar"> Mohammed Ben Ammar</a>, <a href="https://publications.waset.org/search?q=Monji%20Kherallah"> Monji Kherallah</a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>Recently, facial emotion recognition (FER) has become increasingly essential to understand the state of the human mind. However, accurately classifying emotion from the face is a challenging task. In this paper, we present a facial emotion recognition approach named CV-FER benefiting from deep learning, especially CNN and VGG16. First, the data are pre-processed with data cleaning and data rotation. Then, we augment the data and proceed to our FER model, which contains five convolutions layers and five pooling layers. Finally, a softmax classifier is used in the output layer to recognize emotions. Based on the above contents, this paper reviews the works of facial emotion recognition based on deep learning. Experiments show that our model outperforms the other methods using the same FER2013 database and yields a recognition rate of 92%. We also put forward some suggestions for future work. </p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=CNN" title="CNN">CNN</a>, <a href="https://publications.waset.org/search?q=deep-learning" title=" deep-learning"> deep-learning</a>, <a href="https://publications.waset.org/search?q=facial%20emotion%20recognition" title=" facial emotion recognition"> facial emotion recognition</a>, <a href="https://publications.waset.org/search?q=machine%20learning." title=" machine learning."> machine learning.</a> </p> <a href="https://publications.waset.org/10012968/deep-learning-based-approach-to-facial-emotion-recognition-through-convolutional-neural-network" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10012968/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10012968/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10012968/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10012968/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10012968/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10012968/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10012968/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10012968/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10012968/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10012968/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10012968.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">738</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">128</span> Emotion Classification using Adaptive SVMs</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=P.%20Visutsak">P. Visutsak</a> </p> <p class="card-text"><strong>Abstract:</strong></p> The study of the interaction between humans and computers has been emerging during the last few years. This interaction will be more powerful if computers are able to perceive and respond to human nonverbal communication such as emotions. In this study, we present the image-based approach to emotion classification through lower facial expression. We employ a set of feature points in the lower face image according to the particular face model used and consider their motion across each emotive expression of images. The vector of displacements of all feature points input to the Adaptive Support Vector Machines (A-SVMs) classifier that classify it into seven basic emotions scheme, namely neutral, angry, disgust, fear, happy, sad and surprise. The system was tested on the Japanese Female Facial Expression (JAFFE) dataset of frontal view facial expressions [7]. Our experiments on emotion classification through lower facial expressions demonstrate the robustness of Adaptive SVM classifier and verify the high efficiency of our approach. <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=emotion%20classification" title="emotion classification">emotion classification</a>, <a href="https://publications.waset.org/search?q=facial%20expression" title=" facial expression"> facial expression</a>, <a href="https://publications.waset.org/search?q=adaptive%0Asupport%20vector%20machines" title=" adaptive support vector machines"> adaptive support vector machines</a>, <a href="https://publications.waset.org/search?q=facial%20expression%20classifier." title=" facial expression classifier."> facial expression classifier.</a> </p> <a href="https://publications.waset.org/8828/emotion-classification-using-adaptive-svms" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/8828/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/8828/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/8828/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/8828/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/8828/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/8828/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/8828/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/8828/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/8828/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/8828/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/8828.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">2236</span> </span> </div> </div> <div class="card publication-listing mb-3 mt-3"> <h5 class="card-header" style="font-size:.9rem"><span class="badge badge-info">127</span> Innovation Policy and Development of Creative Industries: Case Study of Lithuanian Animation Industry</h5> <div class="card-body"> <p class="card-text"><strong>Authors:</strong> <a href="https://publications.waset.org/search?q=Tomas%20Mitkus">Tomas Mitkus</a>, <a href="https://publications.waset.org/search?q=Vaida%20Nedzinskait%C4%97-Mitk%C4%97"> Vaida Nedzinskait臈-Mitk臈 </a> </p> <p class="card-text"><strong>Abstract:</strong></p> <p>The objective of this study is to identify and explore how adequate is modern innovation support mechanism to developed creative industries. We argue that current development and support strategy for creative industries, although acknowledge high correlation between innovation and creativity, do not seek to improve conditions to promote systematic innovation development in the creative sector. Using the Lithuanian animation industry as a case study, this paper will examine innovation contribution to creativity and, for that matter, the competitiveness of animation enterprises. This paper proposes insights that contribute to theoretical and practical discussions on how creative profile companies build national and international competitiveness through innovations. The conclusions suggest that development of creative industries could greatly benefit if policymakers would implement tools that would encourage creative profile enterprises to invest in to development of innovation at a constant rate.</p> <p class="card-text"><strong>Keywords:</strong> <a href="https://publications.waset.org/search?q=Creative%20industries" title="Creative industries">Creative industries</a>, <a href="https://publications.waset.org/search?q=animation" title=" animation"> animation</a>, <a href="https://publications.waset.org/search?q=innovation" title=" innovation"> innovation</a>, <a href="https://publications.waset.org/search?q=innovation%20policy" title=" innovation policy"> innovation policy</a>, <a href="https://publications.waset.org/search?q=management." title=" management. "> management. </a> </p> <a href="https://publications.waset.org/10008021/innovation-policy-and-development-of-creative-industries-case-study-of-lithuanian-animation-industry" class="btn btn-primary btn-sm">Procedia</a> <a href="https://publications.waset.org/10008021/apa" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">APA</a> <a href="https://publications.waset.org/10008021/bibtex" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">BibTeX</a> <a href="https://publications.waset.org/10008021/chicago" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Chicago</a> <a href="https://publications.waset.org/10008021/endnote" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">EndNote</a> <a href="https://publications.waset.org/10008021/harvard" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">Harvard</a> <a href="https://publications.waset.org/10008021/json" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">JSON</a> <a href="https://publications.waset.org/10008021/mla" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">MLA</a> <a href="https://publications.waset.org/10008021/ris" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">RIS</a> <a href="https://publications.waset.org/10008021/xml" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">XML</a> <a href="https://publications.waset.org/10008021/iso690" target="_blank" rel="nofollow" class="btn btn-primary btn-sm">ISO 690</a> <a href="https://publications.waset.org/10008021.pdf" target="_blank" class="btn btn-primary btn-sm">PDF</a> <span class="bg-info text-light px-1 py-1 float-right rounded"> Downloads <span class="badge badge-light">1044</span> </span> </div> </div> <ul class="pagination"> <li class="page-item disabled"><span class="page-link">‹</span></li> <li class="page-item active"><span class="page-link">1</span></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/search?q=facial%20animation&page=2">2</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/search?q=facial%20animation&page=3">3</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/search?q=facial%20animation&page=4">4</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/search?q=facial%20animation&page=5">5</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/search?q=facial%20animation&page=6">6</a></li> <li class="page-item"><a class="page-link" href="https://publications.waset.org/search?q=facial%20animation&page=2" rel="next">›</a></li> </ul> </div> </main> <footer> <div id="infolinks" class="pt-3 pb-2"> <div class="container"> <div style="background-color:#f5f5f5;" class="p-3"> <div class="row"> <div class="col-md-2"> <ul class="list-unstyled"> About <li><a href="https://waset.org/page/support">About Us</a></li> <li><a href="https://waset.org/page/support#legal-information">Legal</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/WASET-16th-foundational-anniversary.pdf">WASET celebrates its 16th foundational anniversary</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Account <li><a href="https://waset.org/profile">My Account</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Explore <li><a href="https://waset.org/disciplines">Disciplines</a></li> <li><a href="https://waset.org/conferences">Conferences</a></li> <li><a href="https://waset.org/conference-programs">Conference Program</a></li> <li><a href="https://waset.org/committees">Committees</a></li> <li><a href="https://publications.waset.org">Publications</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Research <li><a href="https://publications.waset.org/abstracts">Abstracts</a></li> <li><a href="https://publications.waset.org">Periodicals</a></li> <li><a href="https://publications.waset.org/archive">Archive</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Open Science <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Science-Philosophy.pdf">Open Science Philosophy</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Science-Award.pdf">Open Science Award</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Open-Society-Open-Science-and-Open-Innovation.pdf">Open Innovation</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Postdoctoral-Fellowship-Award.pdf">Postdoctoral Fellowship Award</a></li> <li><a target="_blank" rel="nofollow" href="https://publications.waset.org/static/files/Scholarly-Research-Review.pdf">Scholarly Research Review</a></li> </ul> </div> <div class="col-md-2"> <ul class="list-unstyled"> Support <li><a href="https://waset.org/page/support">Support</a></li> <li><a href="https://waset.org/profile/messages/create">Contact Us</a></li> <li><a href="https://waset.org/profile/messages/create">Report Abuse</a></li> </ul> </div> </div> </div> </div> </div> <div class="container text-center"> <hr style="margin-top:0;margin-bottom:.3rem;"> <a href="https://creativecommons.org/licenses/by/4.0/" target="_blank" class="text-muted small">Creative Commons Attribution 4.0 International License</a> <div id="copy" class="mt-2">© 2025 World Academy of Science, Engineering and Technology</div> </div> </footer> <a href="javascript:" id="return-to-top"><i class="fas fa-arrow-up"></i></a> <div class="modal" id="modal-template"> <div class="modal-dialog"> <div class="modal-content"> <div class="row m-0 mt-1"> <div class="col-md-12"> <button type="button" class="close" data-dismiss="modal" aria-label="Close"><span aria-hidden="true">×</span></button> </div> </div> <div class="modal-body"></div> </div> </div> </div> <script src="https://cdn.waset.org/static/plugins/jquery-3.3.1.min.js"></script> <script src="https://cdn.waset.org/static/plugins/bootstrap-4.2.1/js/bootstrap.bundle.min.js"></script> <script src="https://cdn.waset.org/static/js/site.js?v=150220211556"></script> <script> jQuery(document).ready(function() { /*jQuery.get("https://publications.waset.org/xhr/user-menu", function (response) { jQuery('#mainNavMenu').append(response); });*/ jQuery.get({ url: "https://publications.waset.org/xhr/user-menu", cache: false }).then(function(response){ jQuery('#mainNavMenu').append(response); }); }); </script> </body> </html>