CINXE.COM
Olivier Rosec - Academia.edu
<!DOCTYPE html> <html lang="en" xmlns:fb="http://www.facebook.com/2008/fbml" class="wf-loading"> <head prefix="og: https://ogp.me/ns# fb: https://ogp.me/ns/fb# academia: https://ogp.me/ns/fb/academia#"> <meta charset="utf-8"> <meta name=viewport content="width=device-width, initial-scale=1"> <meta rel="search" type="application/opensearchdescription+xml" href="/open_search.xml" title="Academia.edu"> <title>Olivier Rosec - Academia.edu</title> <!-- _ _ _ | | (_) | | __ _ ___ __ _ __| | ___ _ __ ___ _ __ _ ___ __| |_ _ / _` |/ __/ _` |/ _` |/ _ \ '_ ` _ \| |/ _` | / _ \/ _` | | | | | (_| | (_| (_| | (_| | __/ | | | | | | (_| || __/ (_| | |_| | \__,_|\___\__,_|\__,_|\___|_| |_| |_|_|\__,_(_)___|\__,_|\__,_| We're hiring! See https://www.academia.edu/hiring --> <link href="//a.academia-assets.com/images/favicons/favicon-production.ico" rel="shortcut icon" type="image/vnd.microsoft.icon"> <link rel="apple-touch-icon" sizes="57x57" href="//a.academia-assets.com/images/favicons/apple-touch-icon-57x57.png"> <link rel="apple-touch-icon" sizes="60x60" href="//a.academia-assets.com/images/favicons/apple-touch-icon-60x60.png"> <link rel="apple-touch-icon" sizes="72x72" href="//a.academia-assets.com/images/favicons/apple-touch-icon-72x72.png"> <link rel="apple-touch-icon" sizes="76x76" href="//a.academia-assets.com/images/favicons/apple-touch-icon-76x76.png"> <link rel="apple-touch-icon" sizes="114x114" href="//a.academia-assets.com/images/favicons/apple-touch-icon-114x114.png"> <link rel="apple-touch-icon" sizes="120x120" href="//a.academia-assets.com/images/favicons/apple-touch-icon-120x120.png"> <link rel="apple-touch-icon" sizes="144x144" href="//a.academia-assets.com/images/favicons/apple-touch-icon-144x144.png"> <link rel="apple-touch-icon" sizes="152x152" href="//a.academia-assets.com/images/favicons/apple-touch-icon-152x152.png"> <link rel="apple-touch-icon" sizes="180x180" href="//a.academia-assets.com/images/favicons/apple-touch-icon-180x180.png"> <link rel="icon" type="image/png" href="//a.academia-assets.com/images/favicons/favicon-32x32.png" sizes="32x32"> <link rel="icon" type="image/png" href="//a.academia-assets.com/images/favicons/favicon-194x194.png" sizes="194x194"> <link rel="icon" type="image/png" href="//a.academia-assets.com/images/favicons/favicon-96x96.png" sizes="96x96"> <link rel="icon" type="image/png" href="//a.academia-assets.com/images/favicons/android-chrome-192x192.png" sizes="192x192"> <link rel="icon" type="image/png" href="//a.academia-assets.com/images/favicons/favicon-16x16.png" sizes="16x16"> <link rel="manifest" href="//a.academia-assets.com/images/favicons/manifest.json"> <meta name="msapplication-TileColor" content="#2b5797"> <meta name="msapplication-TileImage" content="//a.academia-assets.com/images/favicons/mstile-144x144.png"> <meta name="theme-color" content="#ffffff"> <script> window.performance && window.performance.measure && window.performance.measure("Time To First Byte", "requestStart", "responseStart"); </script> <script> (function() { if (!window.URLSearchParams || !window.history || !window.history.replaceState) { return; } var searchParams = new URLSearchParams(window.location.search); var paramsToDelete = [ 'fs', 'sm', 'swp', 'iid', 'nbs', 'rcc', // related content category 'rcpos', // related content carousel position 'rcpg', // related carousel page 'rchid', // related content hit id 'f_ri', // research interest id, for SEO tracking 'f_fri', // featured research interest, for SEO tracking (param key without value) 'f_rid', // from research interest directory for SEO tracking 'f_loswp', // from research interest pills on LOSWP sidebar for SEO tracking 'rhid', // referrring hit id ]; if (paramsToDelete.every((key) => searchParams.get(key) === null)) { return; } paramsToDelete.forEach((key) => { searchParams.delete(key); }); var cleanUrl = new URL(window.location.href); cleanUrl.search = searchParams.toString(); history.replaceState({}, document.title, cleanUrl); })(); </script> <script async src="https://www.googletagmanager.com/gtag/js?id=G-5VKX33P2DS"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-5VKX33P2DS', { cookie_domain: 'academia.edu', send_page_view: false, }); gtag('event', 'page_view', { 'controller': "profiles/works", 'action': "summary", 'controller_action': 'profiles/works#summary', 'logged_in': 'false', 'edge': 'unknown', // Send nil if there is no A/B test bucket, in case some records get logged // with missing data - that way we can distinguish between the two cases. // ab_test_bucket should be of the form <ab_test_name>:<bucket> 'ab_test_bucket': null, }) </script> <script type="text/javascript"> window.sendUserTiming = function(timingName) { if (!(window.performance && window.performance.measure)) return; var entries = window.performance.getEntriesByName(timingName, "measure"); if (entries.length !== 1) return; var timingValue = Math.round(entries[0].duration); gtag('event', 'timing_complete', { name: timingName, value: timingValue, event_category: 'User-centric', }); }; window.sendUserTiming("Time To First Byte"); </script> <meta name="csrf-param" content="authenticity_token" /> <meta name="csrf-token" content="vQoK/9vnSuml/8bsV1IO8KgEJvIU/CbKQwusIXUz/GjQiRtjdnXF4qD1NyOh//WExGfe2l6UVE5OcA/54xzmwQ==" /> <link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/wow-77f7b87cb1583fc59aa8f94756ebfe913345937eb932042b4077563bebb5fb4b.css" /><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/social/home-1c712297ae3ac71207193b1bae0ecf1aae125886850f62c9c0139dd867630797.css" /><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/design_system/heading-b2b823dd904da60a48fd1bfa1defd840610c2ff414d3f39ed3af46277ab8df3b.css" /><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/design_system/button-3cea6e0ad4715ed965c49bfb15dedfc632787b32ff6d8c3a474182b231146ab7.css" /><link crossorigin="" href="https://fonts.gstatic.com/" rel="preconnect" /><link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,opsz,wght@0,9..40,100..1000;1,9..40,100..1000&family=Gupter:wght@400;500;700&family=IBM+Plex+Mono:wght@300;400&family=Material+Symbols+Outlined:opsz,wght,FILL,GRAD@20,400,0,0&display=swap" rel="stylesheet" /><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/design_system/common-10fa40af19d25203774df2d4a03b9b5771b45109c2304968038e88a81d1215c5.css" /> <meta name="author" content="olivier rosec" /> <meta name="description" content="Olivier Rosec: 17 Followers, 9 Following, 70 Research papers. Research interests: Musical Instruments, Harmonic Analysis, and Speech Processing." /> <meta name="google-site-verification" content="bKJMBZA7E43xhDOopFZkssMMkBRjvYERV-NaN4R6mrs" /> <script> var $controller_name = 'works'; var $action_name = "summary"; var $rails_env = 'production'; var $app_rev = '49879c2402910372f4abc62630a427bbe033d190'; var $domain = 'academia.edu'; var $app_host = "academia.edu"; var $asset_host = "academia-assets.com"; var $start_time = new Date().getTime(); var $recaptcha_key = "6LdxlRMTAAAAADnu_zyLhLg0YF9uACwz78shpjJB"; var $recaptcha_invisible_key = "6Lf3KHUUAAAAACggoMpmGJdQDtiyrjVlvGJ6BbAj"; var $disableClientRecordHit = false; </script> <script> window.Aedu = { hit_data: null }; window.Aedu.SiteStats = {"premium_universities_count":15276,"monthly_visitors":"113 million","monthly_visitor_count":113468711,"monthly_visitor_count_in_millions":113,"user_count":277163310,"paper_count":55203019,"paper_count_in_millions":55,"page_count":432000000,"page_count_in_millions":432,"pdf_count":16500000,"pdf_count_in_millions":16}; window.Aedu.serverRenderTime = new Date(1732437520000); window.Aedu.timeDifference = new Date().getTime() - 1732437520000; window.Aedu.isUsingCssV1 = false; window.Aedu.enableLocalization = true; window.Aedu.activateFullstory = false; window.Aedu.serviceAvailability = { status: {"attention_db":"on","bibliography_db":"on","contacts_db":"on","email_db":"on","indexability_db":"on","mentions_db":"on","news_db":"on","notifications_db":"on","offsite_mentions_db":"on","redshift":"on","redshift_exports_db":"on","related_works_db":"on","ring_db":"on","user_tests_db":"on"}, serviceEnabled: function(service) { return this.status[service] === "on"; }, readEnabled: function(service) { return this.serviceEnabled(service) || this.status[service] === "read_only"; }, }; window.Aedu.viewApmTrace = function() { // Check if x-apm-trace-id meta tag is set, and open the trace in APM // in a new window if it is. var apmTraceId = document.head.querySelector('meta[name="x-apm-trace-id"]'); if (apmTraceId) { var traceId = apmTraceId.content; // Use trace ID to construct URL, an example URL looks like: // https://app.datadoghq.com/apm/traces?query=trace_id%31298410148923562634 var apmUrl = 'https://app.datadoghq.com/apm/traces?query=trace_id%3A' + traceId; window.open(apmUrl, '_blank'); } }; </script> <!--[if lt IE 9]> <script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.2/html5shiv.min.js"></script> <![endif]--> <link href="https://fonts.googleapis.com/css?family=Roboto:100,100i,300,300i,400,400i,500,500i,700,700i,900,900i" rel="stylesheet"> <link href="//maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css" rel="stylesheet"> <link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/libraries-a9675dcb01ec4ef6aa807ba772c7a5a00c1820d3ff661c1038a20f80d06bb4e4.css" /> <link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/academia-296162c7af6fd81dcdd76f1a94f1fad04fb5f647401337d136fe8b68742170b1.css" /> <link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/design_system_legacy-056a9113b9a0f5343d013b29ee1929d5a18be35fdcdceb616600b4db8bd20054.css" /> <script src="//a.academia-assets.com/assets/webpack_bundles/runtime-bundle-005434038af4252ca37c527588411a3d6a0eabb5f727fac83f8bbe7fd88d93bb.js"></script> <script src="//a.academia-assets.com/assets/webpack_bundles/webpack_libraries_and_infrequently_changed.wjs-bundle-8d53a22151f33ab413d88fa1c02f979c3f8706d470fc1bced09852c72a9f3454.js"></script> <script src="//a.academia-assets.com/assets/webpack_bundles/core_webpack.wjs-bundle-f8fe82512740391f81c9e8cc48220144024b425b359b08194e316f4de070b9e8.js"></script> <script src="//a.academia-assets.com/assets/webpack_bundles/sentry.wjs-bundle-5fe03fddca915c8ba0f7edbe64c194308e8ce5abaed7bffe1255ff37549c4808.js"></script> <script> jade = window.jade || {}; jade.helpers = window.$h; jade._ = window._; </script> <!-- Google Tag Manager --> <script id="tag-manager-head-root">(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); })(window,document,'script','dataLayer_old','GTM-5G9JF7Z');</script> <!-- End Google Tag Manager --> <script> window.gptadslots = []; window.googletag = window.googletag || {}; window.googletag.cmd = window.googletag.cmd || []; </script> <script type="text/javascript"> // TODO(jacob): This should be defined, may be rare load order problem. // Checking if null is just a quick fix, will default to en if unset. // Better fix is to run this immedietely after I18n is set. if (window.I18n != null) { I18n.defaultLocale = "en"; I18n.locale = "en"; I18n.fallbacks = true; } </script> <link rel="canonical" href="https://independent.academia.edu/OlivierRosec" /> </head> <!--[if gte IE 9 ]> <body class='ie ie9 c-profiles/works a-summary logged_out'> <![endif]--> <!--[if !(IE) ]><!--> <body class='c-profiles/works a-summary logged_out'> <!--<![endif]--> <div id="fb-root"></div><script>window.fbAsyncInit = function() { FB.init({ appId: "2369844204", version: "v8.0", status: true, cookie: true, xfbml: true }); // Additional initialization code. if (window.InitFacebook) { // facebook.ts already loaded, set it up. window.InitFacebook(); } else { // Set a flag for facebook.ts to find when it loads. window.academiaAuthReadyFacebook = true; } };</script><script>window.fbAsyncLoad = function() { // Protection against double calling of this function if (window.FB) { return; } (function(d, s, id){ var js, fjs = d.getElementsByTagName(s)[0]; if (d.getElementById(id)) {return;} js = d.createElement(s); js.id = id; js.src = "//connect.facebook.net/en_US/sdk.js"; fjs.parentNode.insertBefore(js, fjs); }(document, 'script', 'facebook-jssdk')); } if (!window.defer_facebook) { // Autoload if not deferred window.fbAsyncLoad(); } else { // Defer loading by 5 seconds setTimeout(function() { window.fbAsyncLoad(); }, 5000); }</script> <div id="google-root"></div><script>window.loadGoogle = function() { if (window.InitGoogle) { // google.ts already loaded, set it up. window.InitGoogle("331998490334-rsn3chp12mbkiqhl6e7lu2q0mlbu0f1b"); } else { // Set a flag for google.ts to use when it loads. window.GoogleClientID = "331998490334-rsn3chp12mbkiqhl6e7lu2q0mlbu0f1b"; } };</script><script>window.googleAsyncLoad = function() { // Protection against double calling of this function (function(d) { var js; var id = 'google-jssdk'; var ref = d.getElementsByTagName('script')[0]; if (d.getElementById(id)) { return; } js = d.createElement('script'); js.id = id; js.async = true; js.onload = loadGoogle; js.src = "https://accounts.google.com/gsi/client" ref.parentNode.insertBefore(js, ref); }(document)); } if (!window.defer_google) { // Autoload if not deferred window.googleAsyncLoad(); } else { // Defer loading by 5 seconds setTimeout(function() { window.googleAsyncLoad(); }, 5000); }</script> <div id="tag-manager-body-root"> <!-- Google Tag Manager (noscript) --> <noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-5G9JF7Z" height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript> <!-- End Google Tag Manager (noscript) --> <!-- Event listeners for analytics --> <script> window.addEventListener('load', function() { if (document.querySelector('input[name="commit"]')) { document.querySelector('input[name="commit"]').addEventListener('click', function() { gtag('event', 'click', { event_category: 'button', event_label: 'Log In' }) }) } }); </script> </div> <script>var _comscore = _comscore || []; _comscore.push({ c1: "2", c2: "26766707" }); (function() { var s = document.createElement("script"), el = document.getElementsByTagName("script")[0]; s.async = true; s.src = (document.location.protocol == "https:" ? "https://sb" : "http://b") + ".scorecardresearch.com/beacon.js"; el.parentNode.insertBefore(s, el); })();</script><img src="https://sb.scorecardresearch.com/p?c1=2&c2=26766707&cv=2.0&cj=1" style="position: absolute; visibility: hidden" /> <div id='react-modal'></div> <div class='DesignSystem'> <a class='u-showOnFocus' href='#site'> Skip to main content </a> </div> <div id="upgrade_ie_banner" style="display: none;"><p>Academia.edu no longer supports Internet Explorer.</p><p>To browse Academia.edu and the wider internet faster and more securely, please take a few seconds to <a href="https://www.academia.edu/upgrade-browser">upgrade your browser</a>.</p></div><script>// Show this banner for all versions of IE if (!!window.MSInputMethodContext || /(MSIE)/.test(navigator.userAgent)) { document.getElementById('upgrade_ie_banner').style.display = 'block'; }</script> <div class="DesignSystem bootstrap ShrinkableNav"><div class="navbar navbar-default main-header"><div class="container-wrapper" id="main-header-container"><div class="container"><div class="navbar-header"><div class="nav-left-wrapper u-mt0x"><div class="nav-logo"><a data-main-header-link-target="logo_home" href="https://www.academia.edu/"><img class="visible-xs-inline-block" style="height: 24px;" alt="Academia.edu" src="//a.academia-assets.com/images/academia-logo-redesign-2015-A.svg" width="24" height="24" /><img width="145.2" height="18" class="hidden-xs" style="height: 24px;" alt="Academia.edu" src="//a.academia-assets.com/images/academia-logo-redesign-2015.svg" /></a></div><div class="nav-search"><div class="SiteSearch-wrapper select2-no-default-pills"><form class="js-SiteSearch-form DesignSystem" action="https://www.academia.edu/search" accept-charset="UTF-8" method="get"><input name="utf8" type="hidden" value="✓" autocomplete="off" /><i class="SiteSearch-icon fa fa-search u-fw700 u-positionAbsolute u-tcGrayDark"></i><input class="js-SiteSearch-form-input SiteSearch-form-input form-control" data-main-header-click-target="search_input" name="q" placeholder="Search" type="text" value="" /></form></div></div></div><div class="nav-right-wrapper pull-right"><ul class="NavLinks js-main-nav list-unstyled"><li class="NavLinks-link"><a class="js-header-login-url Button Button--inverseGray Button--sm u-mb4x" id="nav_log_in" rel="nofollow" href="https://www.academia.edu/login">Log In</a></li><li class="NavLinks-link u-p0x"><a class="Button Button--inverseGray Button--sm u-mb4x" rel="nofollow" href="https://www.academia.edu/signup">Sign Up</a></li></ul><button class="hidden-lg hidden-md hidden-sm u-ml4x navbar-toggle collapsed" data-target=".js-mobile-header-links" data-toggle="collapse" type="button"><span class="icon-bar"></span><span class="icon-bar"></span><span class="icon-bar"></span></button></div></div><div class="collapse navbar-collapse js-mobile-header-links"><ul class="nav navbar-nav"><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/login">Log In</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/signup">Sign Up</a></li><li class="u-borderColorGrayLight u-borderBottom1 js-mobile-nav-expand-trigger"><a href="#">more <span class="caret"></span></a></li><li><ul class="js-mobile-nav-expand-section nav navbar-nav u-m0x collapse"><li class="u-borderColorGrayLight u-borderBottom1"><a rel="false" href="https://www.academia.edu/about">About</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/press">Press</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://medium.com/@academia">Blog</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="false" href="https://www.academia.edu/documents">Papers</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/terms">Terms</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/privacy">Privacy</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/copyright">Copyright</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/hiring"><i class="fa fa-briefcase"></i> We're Hiring!</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://support.academia.edu/"><i class="fa fa-question-circle"></i> Help Center</a></li><li class="js-mobile-nav-collapse-trigger u-borderColorGrayLight u-borderBottom1 dropup" style="display:none"><a href="#">less <span class="caret"></span></a></li></ul></li></ul></div></div></div><script>(function(){ var $moreLink = $(".js-mobile-nav-expand-trigger"); var $lessLink = $(".js-mobile-nav-collapse-trigger"); var $section = $('.js-mobile-nav-expand-section'); $moreLink.click(function(ev){ ev.preventDefault(); $moreLink.hide(); $lessLink.show(); $section.collapse('show'); }); $lessLink.click(function(ev){ ev.preventDefault(); $moreLink.show(); $lessLink.hide(); $section.collapse('hide'); }); })() if ($a.is_logged_in() || false) { new Aedu.NavigationController({ el: '.js-main-nav', showHighlightedNotification: false }); } else { $(".js-header-login-url").attr("href", $a.loginUrlWithRedirect()); } Aedu.autocompleteSearch = new AutocompleteSearch({el: '.js-SiteSearch-form'});</script></div></div> <div id='site' class='fixed'> <div id="content" class="clearfix"> <script>document.addEventListener('DOMContentLoaded', function(){ var $dismissible = $(".dismissible_banner"); $dismissible.click(function(ev) { $dismissible.hide(); }); });</script> <script src="//a.academia-assets.com/assets/webpack_bundles/profile.wjs-bundle-9601d1cc3d68aa07c0a9901d03d3611aec04cc07d2a2039718ebef4ad4d148ca.js" defer="defer"></script><script>Aedu.rankings = { showPaperRankingsLink: false } $viewedUser = Aedu.User.set_viewed( {"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec","photo":"/images/s65_no_pic.png","has_photo":false,"is_analytics_public":false,"interests":[{"id":53272,"name":"Musical Instruments","url":"https://www.academia.edu/Documents/in/Musical_Instruments"},{"id":374,"name":"Harmonic Analysis","url":"https://www.academia.edu/Documents/in/Harmonic_Analysis"},{"id":36835,"name":"Speech Processing","url":"https://www.academia.edu/Documents/in/Speech_Processing"},{"id":4148,"name":"Audio Signal Processing","url":"https://www.academia.edu/Documents/in/Audio_Signal_Processing"},{"id":42313,"name":"Emotional Speech","url":"https://www.academia.edu/Documents/in/Emotional_Speech"}]} ); if ($a.is_logged_in() && $viewedUser.is_current_user()) { $('body').addClass('profile-viewed-by-owner'); } $socialProfiles = []</script><div id="js-react-on-rails-context" style="display:none" data-rails-context="{"inMailer":false,"i18nLocale":"en","i18nDefaultLocale":"en","href":"https://independent.academia.edu/OlivierRosec","location":"/OlivierRosec","scheme":"https","host":"independent.academia.edu","port":null,"pathname":"/OlivierRosec","search":null,"httpAcceptLanguage":null,"serverSide":false}"></div> <div class="js-react-on-rails-component" style="display:none" data-component-name="ProfileCheckPaperUpdate" data-props="{}" data-trace="false" data-dom-id="ProfileCheckPaperUpdate-react-component-4be5937e-312f-403e-8647-108b04cf37b2"></div> <div id="ProfileCheckPaperUpdate-react-component-4be5937e-312f-403e-8647-108b04cf37b2"></div> <div class="DesignSystem"><div class="onsite-ping" id="onsite-ping"></div></div><div class="profile-user-info DesignSystem"><div class="social-profile-container"><div class="left-panel-container"><div class="user-info-component-wrapper"><div class="user-summary-cta-container"><div class="user-summary-container"><div class="social-profile-avatar-container"><img class="profile-avatar u-positionAbsolute" border="0" alt="" src="//a.academia-assets.com/images/s200_no_pic.png" /></div><div class="title-container"><h1 class="ds2-5-heading-sans-serif-sm">Olivier Rosec</h1><div class="affiliations-container fake-truncate js-profile-affiliations"></div></div></div><div class="sidebar-cta-container"><button class="ds2-5-button hidden profile-cta-button grow js-profile-follow-button" data-broccoli-component="user-info.follow-button" data-click-track="profile-user-info-follow-button" data-follow-user-fname="Olivier" data-follow-user-id="35678791" data-follow-user-source="profile_button" data-has-google="false"><span class="material-symbols-outlined" style="font-size: 20px" translate="no">add</span>Follow</button><button class="ds2-5-button hidden profile-cta-button grow js-profile-unfollow-button" data-broccoli-component="user-info.unfollow-button" data-click-track="profile-user-info-unfollow-button" data-unfollow-user-id="35678791"><span class="material-symbols-outlined" style="font-size: 20px" translate="no">done</span>Following</button></div></div><div class="user-stats-container"><a><div class="stat-container js-profile-followers"><p class="label">Followers</p><p class="data">17</p></div></a><a><div class="stat-container js-profile-followees" data-broccoli-component="user-info.followees-count" data-click-track="profile-expand-user-info-following"><p class="label">Following</p><p class="data">9</p></div></a><a><div class="stat-container js-profile-coauthors" data-broccoli-component="user-info.coauthors-count" data-click-track="profile-expand-user-info-coauthors"><p class="label">Co-authors</p><p class="data">9</p></div></a><span><div class="stat-container"><p class="label"><span class="js-profile-total-view-text">Public Views</span></p><p class="data"><span class="js-profile-view-count"></span></p></div></span></div><div class="ri-section"><div class="ri-section-header"><span>Interests</span></div><div class="ri-tags-container"><a data-click-track="profile-user-info-expand-research-interests" data-has-card-for-ri-list="35678791" href="https://www.academia.edu/Documents/in/Musical_Instruments"><div id="js-react-on-rails-context" style="display:none" data-rails-context="{"inMailer":false,"i18nLocale":"en","i18nDefaultLocale":"en","href":"https://independent.academia.edu/OlivierRosec","location":"/OlivierRosec","scheme":"https","host":"independent.academia.edu","port":null,"pathname":"/OlivierRosec","search":null,"httpAcceptLanguage":null,"serverSide":false}"></div> <div class="js-react-on-rails-component" style="display:none" data-component-name="Pill" data-props="{"color":"gray","children":["Musical Instruments"]}" data-trace="false" data-dom-id="Pill-react-component-c743c0a8-3882-4afa-aab8-25c03a5c5a61"></div> <div id="Pill-react-component-c743c0a8-3882-4afa-aab8-25c03a5c5a61"></div> </a><a data-click-track="profile-user-info-expand-research-interests" data-has-card-for-ri-list="35678791" href="https://www.academia.edu/Documents/in/Harmonic_Analysis"><div class="js-react-on-rails-component" style="display:none" data-component-name="Pill" data-props="{"color":"gray","children":["Harmonic Analysis"]}" data-trace="false" data-dom-id="Pill-react-component-a9435982-a03b-46cc-bf66-32bd246582b2"></div> <div id="Pill-react-component-a9435982-a03b-46cc-bf66-32bd246582b2"></div> </a><a data-click-track="profile-user-info-expand-research-interests" data-has-card-for-ri-list="35678791" href="https://www.academia.edu/Documents/in/Speech_Processing"><div class="js-react-on-rails-component" style="display:none" data-component-name="Pill" data-props="{"color":"gray","children":["Speech Processing"]}" data-trace="false" data-dom-id="Pill-react-component-2dd9e639-abe5-454b-aec4-2a990ea2d9c9"></div> <div id="Pill-react-component-2dd9e639-abe5-454b-aec4-2a990ea2d9c9"></div> </a><a data-click-track="profile-user-info-expand-research-interests" data-has-card-for-ri-list="35678791" href="https://www.academia.edu/Documents/in/Audio_Signal_Processing"><div class="js-react-on-rails-component" style="display:none" data-component-name="Pill" data-props="{"color":"gray","children":["Audio Signal Processing"]}" data-trace="false" data-dom-id="Pill-react-component-2eec7aa1-d7fc-477e-b5f7-aff41b5ea0ef"></div> <div id="Pill-react-component-2eec7aa1-d7fc-477e-b5f7-aff41b5ea0ef"></div> </a><a data-click-track="profile-user-info-expand-research-interests" data-has-card-for-ri-list="35678791" href="https://www.academia.edu/Documents/in/Emotional_Speech"><div class="js-react-on-rails-component" style="display:none" data-component-name="Pill" data-props="{"color":"gray","children":["Emotional Speech"]}" data-trace="false" data-dom-id="Pill-react-component-fe39f993-2d49-45f0-a525-b3e8a337e313"></div> <div id="Pill-react-component-fe39f993-2d49-45f0-a525-b3e8a337e313"></div> </a></div></div></div></div><div class="right-panel-container"><div class="user-content-wrapper"><div class="uploads-container" id="social-redesign-work-container"><div class="upload-header"><h2 class="ds2-5-heading-sans-serif-xs">Uploads</h2></div><div class="documents-container backbone-social-profile-documents" style="width: 100%;"><div class="u-taCenter"></div><div class="profile--tab_content_container js-tab-pane tab-pane active" id="all"><div class="profile--tab_heading_container js-section-heading" data-section="Papers" id="Papers"><h3 class="profile--tab_heading_container">Papers by Olivier Rosec</h3></div><div class="js-work-strip profile--work_container" data-work-id="26799237"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799237/Improved_Voice_Signal_Conversion_Method_and_System"><img alt="Research paper thumbnail of Improved Voice Signal Conversion Method and System" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799237/Improved_Voice_Signal_Conversion_Method_and_System">Improved Voice Signal Conversion Method and System</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799237"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799237"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799237; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799237]").text(description); $(".js-view-count[data-work-id=26799237]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799237; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799237']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799237, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799237]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799237,"title":"Improved Voice Signal Conversion Method and System","translated_title":"","metadata":{"publication_date":{"day":13,"month":12,"year":2006,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799237/Improved_Voice_Signal_Conversion_Method_and_System","translated_internal_url":"","created_at":"2016-07-07T00:55:23.453-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Improved_Voice_Signal_Conversion_Method_and_System","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294934,"url":"http://www.freepatentsonline.com/EP1730729.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799234"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799234/Method_and_device_for_modifying_an_audio_signal"><img alt="Research paper thumbnail of Method and device for modifying an audio signal" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799234/Method_and_device_for_modifying_an_audio_signal">Method and device for modifying an audio signal</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799234"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799234"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799234; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799234]").text(description); $(".js-view-count[data-work-id=26799234]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799234; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799234']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799234, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799234]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799234,"title":"Method and device for modifying an audio signal","translated_title":"","metadata":{"publication_date":{"day":21,"month":2,"year":2012,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799234/Method_and_device_for_modifying_an_audio_signal","translated_internal_url":"","created_at":"2016-07-07T00:55:15.738-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Method_and_device_for_modifying_an_audio_signal","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294933,"url":"http://www.freepatentsonline.com/8121834.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26479065"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26479065/Towards_flexible_speech_coding_for_speech_synthesis_an_LF_modulated_noise_vocoder"><img alt="Research paper thumbnail of Towards flexible speech coding for speech synthesis: an LF+ modulated noise vocoder" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26479065/Towards_flexible_speech_coding_for_speech_synthesis_an_LF_modulated_noise_vocoder">Towards flexible speech coding for speech synthesis: an LF+ modulated noise vocoder</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/OlivierRosec">Olivier Rosec</a> and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/YannisAgiomyrgiannakis">Yannis Agiomyrgiannakis</a></span></div><div class="wp-workCard_item"><span>Ninth Annual Conference of the …</span><span>, 2008</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">... Orange Labs, TECH-SSTP-VMI {yannis.agiomyrgiannakis, olivier.rosec}@orange-ftgroup.com ... t ...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">... Orange Labs, TECH-SSTP-VMI {yannis.agiomyrgiannakis, olivier.rosec}@orange-ftgroup.com ... t c open phase ... [5] Y. Stylianou, Harmonic-plus-noise Models for speech, combined with statistical methods for speech and speaker modification, Ph.D. dissertation, Ecole Nationale. ...</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26479065"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26479065"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26479065; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26479065]").text(description); $(".js-view-count[data-work-id=26479065]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26479065; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26479065']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26479065, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26479065]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26479065,"title":"Towards flexible speech coding for speech synthesis: an LF+ modulated noise vocoder","translated_title":"","metadata":{"abstract":"... Orange Labs, TECH-SSTP-VMI {yannis.agiomyrgiannakis, olivier.rosec}@orange-ftgroup.com ... t c open phase ... [5] Y. Stylianou, Harmonic-plus-noise Models for speech, combined with statistical methods for speech and speaker modification, Ph.D. dissertation, Ecole Nationale. ...","publisher":"isca-speech.org","publication_date":{"day":null,"month":null,"year":2008,"errors":{}},"publication_name":"Ninth Annual Conference of the …"},"translated_abstract":"... Orange Labs, TECH-SSTP-VMI {yannis.agiomyrgiannakis, olivier.rosec}@orange-ftgroup.com ... t c open phase ... [5] Y. Stylianou, Harmonic-plus-noise Models for speech, combined with statistical methods for speech and speaker modification, Ph.D. dissertation, Ecole Nationale. ...","internal_url":"https://www.academia.edu/26479065/Towards_flexible_speech_coding_for_speech_synthesis_an_LF_modulated_noise_vocoder","translated_internal_url":"","created_at":"2016-06-24T23:21:36.663-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":50438551,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":21726055,"work_id":26479065,"tagging_user_id":50438551,"tagged_user_id":35678791,"co_author_invite_id":null,"email":"o***c@voxygen.fr","display_order":0,"name":"Olivier Rosec","title":"Towards flexible speech coding for speech synthesis: an LF+ modulated noise vocoder"}],"downloadable_attachments":[],"slug":"Towards_flexible_speech_coding_for_speech_synthesis_an_LF_modulated_noise_vocoder","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":50438551,"first_name":"Yannis","middle_initials":null,"last_name":"Agiomyrgiannakis","page_name":"YannisAgiomyrgiannakis","domain_name":"independent","created_at":"2016-06-24T23:21:25.598-07:00","display_name":"Yannis Agiomyrgiannakis","url":"https://independent.academia.edu/YannisAgiomyrgiannakis"},"attachments":[],"research_interests":[{"id":2342,"name":"Speech Synthesis","url":"https://www.academia.edu/Documents/in/Speech_Synthesis"},{"id":299796,"name":"Speech Coding","url":"https://www.academia.edu/Documents/in/Speech_Coding"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799227"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799227/Segmentation_Automatique_De_Corpus_De_Parole_Continue_D%C3%A9di%C3%A9s_%C3%80_La_Synth%C3%A8se_Vocale"><img alt="Research paper thumbnail of Segmentation Automatique De Corpus De Parole Continue Dédiés À La Synthèse Vocale" class="work-thumbnail" src="https://attachments.academia-assets.com/47073271/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799227/Segmentation_Automatique_De_Corpus_De_Parole_Continue_D%C3%A9di%C3%A9s_%C3%80_La_Synth%C3%A8se_Vocale">Segmentation Automatique De Corpus De Parole Continue Dédiés À La Synthèse Vocale</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="a28cbd55e0a2a9c6d801527714f11b0c" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073271,"asset_id":26799227,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073271/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799227"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799227"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799227; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799227]").text(description); $(".js-view-count[data-work-id=26799227]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799227; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799227']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799227, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "a28cbd55e0a2a9c6d801527714f11b0c" } } $('.js-work-strip[data-work-id=26799227]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799227,"title":"Segmentation Automatique De Corpus De Parole Continue Dédiés À La Synthèse Vocale","translated_title":"","metadata":{},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799227/Segmentation_Automatique_De_Corpus_De_Parole_Continue_D%C3%A9di%C3%A9s_%C3%80_La_Synth%C3%A8se_Vocale","translated_internal_url":"","created_at":"2016-07-07T00:54:40.488-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073271,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073271/thumbnails/1.jpg","file_name":"SEGMENTATION_AUTOMATIQUE_DE_CORPUS_DE_PA20160707-30874-1wwx6tl.pdf","download_url":"https://www.academia.edu/attachments/47073271/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Segmentation_Automatique_De_Corpus_De_Pa.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073271/SEGMENTATION_AUTOMATIQUE_DE_CORPUS_DE_PA20160707-30874-1wwx6tl-libre.pdf?1467878533=\u0026response-content-disposition=attachment%3B+filename%3DSegmentation_Automatique_De_Corpus_De_Pa.pdf\u0026Expires=1732441119\u0026Signature=Sel22yTgxF7ChrDf-PnVWkgfocGSx-cg8Uk4b5w7tqG~G3OwETBJqBOMCmA2Un~LylfghN8g9qjRTX9bYc-Vom6qgTEhPYPqOxV1lqGlnvJcn1UwQgjqnc0ccoR656lgUVUPdjzG2fundfePp3FimjLxL9Q1pFtVRAc4mJyhFoOX1H4CXmKymNsgDE5SaKLG4Xz1so2umAljgexCD9DsJX-Lhcbo7-hiKjFEeExQzzm~LEU4x~duCXg77SZ5jSm1Pf3Mvp~u02BzO63l5VMm5d76HyWxVoqRH0tl1FqAViqkx8w3F-6rEKav4sO0ZKq3VcAuKhqmqIiYdCB1alGFoA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Segmentation_Automatique_De_Corpus_De_Parole_Continue_Dédiés_À_La_Synthèse_Vocale","translated_slug":"","page_count":19,"language":"fr","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073271,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073271/thumbnails/1.jpg","file_name":"SEGMENTATION_AUTOMATIQUE_DE_CORPUS_DE_PA20160707-30874-1wwx6tl.pdf","download_url":"https://www.academia.edu/attachments/47073271/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Segmentation_Automatique_De_Corpus_De_Pa.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073271/SEGMENTATION_AUTOMATIQUE_DE_CORPUS_DE_PA20160707-30874-1wwx6tl-libre.pdf?1467878533=\u0026response-content-disposition=attachment%3B+filename%3DSegmentation_Automatique_De_Corpus_De_Pa.pdf\u0026Expires=1732441119\u0026Signature=Sel22yTgxF7ChrDf-PnVWkgfocGSx-cg8Uk4b5w7tqG~G3OwETBJqBOMCmA2Un~LylfghN8g9qjRTX9bYc-Vom6qgTEhPYPqOxV1lqGlnvJcn1UwQgjqnc0ccoR656lgUVUPdjzG2fundfePp3FimjLxL9Q1pFtVRAc4mJyhFoOX1H4CXmKymNsgDE5SaKLG4Xz1so2umAljgexCD9DsJX-Lhcbo7-hiKjFEeExQzzm~LEU4x~duCXg77SZ5jSm1Pf3Mvp~u02BzO63l5VMm5d76HyWxVoqRH0tl1FqAViqkx8w3F-6rEKav4sO0ZKq3VcAuKhqmqIiYdCB1alGFoA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799225"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799225/Adapting_prosodic_chunking_algorithm_and_synthesis_system_to_specific_style_the_case_of_dictation"><img alt="Research paper thumbnail of Adapting prosodic chunking algorithm and synthesis system to specific style: the case of dictation" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799225/Adapting_prosodic_chunking_algorithm_and_synthesis_system_to_specific_style_the_case_of_dictation">Adapting prosodic chunking algorithm and synthesis system to specific style: the case of dictation</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">ABSTRACT In this paper, we present an approach that allows a TTS- system to dictate texts to prim...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">ABSTRACT In this paper, we present an approach that allows a TTS- system to dictate texts to primary school pupils, while being in conformity with the prosodic features of this speaking style. The approach relies on the elaboration of a preprocessing prosodic module that avoids developing a specific system for a so limited task. The proposal is based on two distinct elements: (i) the results of a preliminary evaluation that allowed getting feedback from potential users; (ii) a corpus study of 10 dictations annotated or uttered by 13 teachers or speech therapists (10 and 3 respectively). The preliminary evaluation focused on three points: the accuracy of the segmentation procedure, the size of the automatically calculated chunks, and the intelligibility of the synthesized voice. It showed that the chunks were judged too long, and the speaking rate too fast. We thus decided to work on these two issues while analyzing the collected data, and confronting the obtained realizations with the outcome of the speech synthesis system and the chunking algorithm. The results of the analysis lead to propose a module that provides for this speaking style an enriched text that can be treated by the synthesizer to constrain the unit selection and the prosodic realization.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799225"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799225"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799225; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799225]").text(description); $(".js-view-count[data-work-id=26799225]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799225; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799225']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799225, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799225]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799225,"title":"Adapting prosodic chunking algorithm and synthesis system to specific style: the case of dictation","translated_title":"","metadata":{"abstract":"ABSTRACT In this paper, we present an approach that allows a TTS- system to dictate texts to primary school pupils, while being in conformity with the prosodic features of this speaking style. The approach relies on the elaboration of a preprocessing prosodic module that avoids developing a specific system for a so limited task. The proposal is based on two distinct elements: (i) the results of a preliminary evaluation that allowed getting feedback from potential users; (ii) a corpus study of 10 dictations annotated or uttered by 13 teachers or speech therapists (10 and 3 respectively). The preliminary evaluation focused on three points: the accuracy of the segmentation procedure, the size of the automatically calculated chunks, and the intelligibility of the synthesized voice. It showed that the chunks were judged too long, and the speaking rate too fast. We thus decided to work on these two issues while analyzing the collected data, and confronting the obtained realizations with the outcome of the speech synthesis system and the chunking algorithm. The results of the analysis lead to propose a module that provides for this speaking style an enriched text that can be treated by the synthesizer to constrain the unit selection and the prosodic realization."},"translated_abstract":"ABSTRACT In this paper, we present an approach that allows a TTS- system to dictate texts to primary school pupils, while being in conformity with the prosodic features of this speaking style. The approach relies on the elaboration of a preprocessing prosodic module that avoids developing a specific system for a so limited task. The proposal is based on two distinct elements: (i) the results of a preliminary evaluation that allowed getting feedback from potential users; (ii) a corpus study of 10 dictations annotated or uttered by 13 teachers or speech therapists (10 and 3 respectively). The preliminary evaluation focused on three points: the accuracy of the segmentation procedure, the size of the automatically calculated chunks, and the intelligibility of the synthesized voice. It showed that the chunks were judged too long, and the speaking rate too fast. We thus decided to work on these two issues while analyzing the collected data, and confronting the obtained realizations with the outcome of the speech synthesis system and the chunking algorithm. The results of the analysis lead to propose a module that provides for this speaking style an enriched text that can be treated by the synthesizer to constrain the unit selection and the prosodic realization.","internal_url":"https://www.academia.edu/26799225/Adapting_prosodic_chunking_algorithm_and_synthesis_system_to_specific_style_the_case_of_dictation","translated_internal_url":"","created_at":"2016-07-07T00:54:32.108-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Adapting_prosodic_chunking_algorithm_and_synthesis_system_to_specific_style_the_case_of_dictation","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799224"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799224/Algorithme_de_d%C3%A9coupage_en_groupes_prosodiques_pour_la_dict%C3%A9e_par_lusage_de_synth%C3%A8se_vocale"><img alt="Research paper thumbnail of Algorithme de découpage en groupes prosodiques pour la dictée par l'usage de synthèse vocale" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799224/Algorithme_de_d%C3%A9coupage_en_groupes_prosodiques_pour_la_dict%C3%A9e_par_lusage_de_synth%C3%A8se_vocale">Algorithme de découpage en groupes prosodiques pour la dictée par l'usage de synthèse vocale</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799224"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799224"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799224; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799224]").text(description); $(".js-view-count[data-work-id=26799224]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799224; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799224']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799224, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799224]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799224,"title":"Algorithme de découpage en groupes prosodiques pour la dictée par l'usage de synthèse vocale","translated_title":"","metadata":{},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799224/Algorithme_de_d%C3%A9coupage_en_groupes_prosodiques_pour_la_dict%C3%A9e_par_lusage_de_synth%C3%A8se_vocale","translated_internal_url":"","created_at":"2016-07-07T00:54:31.978-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Algorithme_de_découpage_en_groupes_prosodiques_pour_la_dictée_par_lusage_de_synthèse_vocale","translated_slug":"","page_count":null,"language":"fr","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799223"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799223/Concatenation_cost_calculation_and_optimisation_for_unit_selection_in_TTS"><img alt="Research paper thumbnail of Concatenation cost calculation and optimisation for unit selection in TTS" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799223/Concatenation_cost_calculation_and_optimisation_for_unit_selection_in_TTS">Concatenation cost calculation and optimisation for unit selection in TTS</a></div><div class="wp-workCard_item"><span>Proceedings of 2002 IEEE Workshop on Speech Synthesis 2002 WSS-02</span><span>, 2002</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">... Christophe Blouin *, Olivier Rosec *, Paul C. Bagshaw * C? Christophe d&#x27;Alessandro ** *F...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">... Christophe Blouin *, Olivier Rosec *, Paul C. Bagshaw * C? Christophe d&#x27;Alessandro ** *France TClCcom R&amp;D FTRD/DIH, **LIMSI, CNRS, Orsay E-mail: (christophe.blouinlolivier.rosecIpaul. bagshaw)@rd.francetelecom.com, <a href="mailto:cda@limsi.fr" rel="nofollow">cda@limsi.fr</a> ... [Bree&#x27;98] Breen A. and Jackson P ...</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799223"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799223"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799223; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799223]").text(description); $(".js-view-count[data-work-id=26799223]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799223; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799223']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799223, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799223]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799223,"title":"Concatenation cost calculation and optimisation for unit selection in TTS","translated_title":"","metadata":{"abstract":"... Christophe Blouin *, Olivier Rosec *, Paul C. Bagshaw * C? Christophe d\u0026#x27;Alessandro ** *France TClCcom R\u0026amp;D FTRD/DIH, **LIMSI, CNRS, Orsay E-mail: (christophe.blouinlolivier.rosecIpaul. bagshaw)@rd.francetelecom.com, cda@limsi.fr ... [Bree\u0026#x27;98] Breen A. and Jackson P ...","publication_date":{"day":null,"month":null,"year":2002,"errors":{}},"publication_name":"Proceedings of 2002 IEEE Workshop on Speech Synthesis 2002 WSS-02"},"translated_abstract":"... Christophe Blouin *, Olivier Rosec *, Paul C. Bagshaw * C? Christophe d\u0026#x27;Alessandro ** *France TClCcom R\u0026amp;D FTRD/DIH, **LIMSI, CNRS, Orsay E-mail: (christophe.blouinlolivier.rosecIpaul. bagshaw)@rd.francetelecom.com, cda@limsi.fr ... [Bree\u0026#x27;98] Breen A. and Jackson P ...","internal_url":"https://www.academia.edu/26799223/Concatenation_cost_calculation_and_optimisation_for_unit_selection_in_TTS","translated_internal_url":"","created_at":"2016-07-07T00:54:31.847-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Concatenation_cost_calculation_and_optimisation_for_unit_selection_in_TTS","translated_slug":"","page_count":null,"language":"fr","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[{"id":2342,"name":"Speech Synthesis","url":"https://www.academia.edu/Documents/in/Speech_Synthesis"},{"id":40276,"name":"Proceedings","url":"https://www.academia.edu/Documents/in/Proceedings"},{"id":199316,"name":"Multiple Linear Regression","url":"https://www.academia.edu/Documents/in/Multiple_Linear_Regression"},{"id":999121,"name":"Unit Selection","url":"https://www.academia.edu/Documents/in/Unit_Selection"},{"id":1766708,"name":"WSS","url":"https://www.academia.edu/Documents/in/WSS"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799221"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799221/A_voice_conversion_method_based_on_joint_pitch_and_spectral_envelope_transformation"><img alt="Research paper thumbnail of A voice conversion method based on joint pitch and spectral envelope transformation" class="work-thumbnail" src="https://attachments.academia-assets.com/47073266/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799221/A_voice_conversion_method_based_on_joint_pitch_and_spectral_envelope_transformation">A voice conversion method based on joint pitch and spectral envelope transformation</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="14feb752e3c20c9d8dd92afe2f5146a6" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073266,"asset_id":26799221,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073266/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799221"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799221"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799221; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799221]").text(description); $(".js-view-count[data-work-id=26799221]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799221; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799221']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799221, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "14feb752e3c20c9d8dd92afe2f5146a6" } } $('.js-work-strip[data-work-id=26799221]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799221,"title":"A voice conversion method based on joint pitch and spectral envelope transformation","translated_title":"","metadata":{"grobid_abstract":"Most of the research in Voice Conversion (VC) is devoted to spectral transformation while the conversion of prosodic features is essentially obtained through a simple linear transformation of pitch. These separate transformations lead to an unsatisfactory speech conversion quality, especially when the speaking styles of the source and target speakers are different. In this paper, we propose a method capable of jointly converting pitch and spectral envelope information. The parameters to be transformed are obtained by combining scaled pitch values with the spectral envelope parameters for the voiced frames and only spectral envelope parameters for the unvoiced ones. These parameters are clustered using a Gaussian Mixture Model (GMM). Then the transformation functions are determined using a conditional expectation estimator. Tests carried out show that, this process leads to a satisfactory pitch transformation. Moreover, it makes the spectral envelope transformation more robust.","publication_date":{"day":null,"month":null,"year":2004,"errors":{}},"grobid_abstract_attachment_id":47073266},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799221/A_voice_conversion_method_based_on_joint_pitch_and_spectral_envelope_transformation","translated_internal_url":"","created_at":"2016-07-07T00:54:23.135-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073266,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073266/thumbnails/1.jpg","file_name":"A_voice_conversion_method_based_on_joint20160707-31546-1amwrvh.pdf","download_url":"https://www.academia.edu/attachments/47073266/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"A_voice_conversion_method_based_on_joint.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073266/A_voice_conversion_method_based_on_joint20160707-31546-1amwrvh-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DA_voice_conversion_method_based_on_joint.pdf\u0026Expires=1732441119\u0026Signature=f1cZfb4pxr4AkHhykLXQWvrcWNgxXCMSKBal3jHPZfqYUZ1yZ1Fk8~Rppn5wOmv02ICIbGTru4PiAZFzTS7d9xVjTXix9MgQFupnJqHn5jkpCKF-4tvF-hrxu59LXWai71GMIp5HeZltMmjAmj2Ezs~mwDnblXuygOouUvV5OYaseTitqAB6puWX5uvJ8nllO8l0da~Zdb1XWQ6elvn0AiGuxFKr9gr20LcvdtaqQudM5nfuLau11tiA9l3wu0QTzf5E0IhuM4u4tJe306FC7tEkuqIHFuhNViVBtXyJUs-Tqt0kx~ygronftcn8lxV6G2rVohaCvfa3SLD09RMxxw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"A_voice_conversion_method_based_on_joint_pitch_and_spectral_envelope_transformation","translated_slug":"","page_count":4,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073266,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073266/thumbnails/1.jpg","file_name":"A_voice_conversion_method_based_on_joint20160707-31546-1amwrvh.pdf","download_url":"https://www.academia.edu/attachments/47073266/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"A_voice_conversion_method_based_on_joint.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073266/A_voice_conversion_method_based_on_joint20160707-31546-1amwrvh-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DA_voice_conversion_method_based_on_joint.pdf\u0026Expires=1732441119\u0026Signature=f1cZfb4pxr4AkHhykLXQWvrcWNgxXCMSKBal3jHPZfqYUZ1yZ1Fk8~Rppn5wOmv02ICIbGTru4PiAZFzTS7d9xVjTXix9MgQFupnJqHn5jkpCKF-4tvF-hrxu59LXWai71GMIp5HeZltMmjAmj2Ezs~mwDnblXuygOouUvV5OYaseTitqAB6puWX5uvJ8nllO8l0da~Zdb1XWQ6elvn0AiGuxFKr9gr20LcvdtaqQudM5nfuLau11tiA9l3wu0QTzf5E0IhuM4u4tJe306FC7tEkuqIHFuhNViVBtXyJUs-Tqt0kx~ygronftcn8lxV6G2rVohaCvfa3SLD09RMxxw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":64590,"name":"Voice Conversion","url":"https://www.academia.edu/Documents/in/Voice_Conversion"},{"id":327120,"name":"Gaussian Mixture Model","url":"https://www.academia.edu/Documents/in/Gaussian_Mixture_Model"},{"id":521553,"name":"Conditional Expectation","url":"https://www.academia.edu/Documents/in/Conditional_Expectation"},{"id":2277205,"name":"Linear Transformation","url":"https://www.academia.edu/Documents/in/Linear_Transformation"}],"urls":[{"id":7294930,"url":"http://eurecom.fr/~ennajjar/pub/en-najjary-icslp-2004.pdf"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799220"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799220/On_the_robustness_of_the_Quasi_Harmonic_model_of_speech"><img alt="Research paper thumbnail of On the robustness of the Quasi-Harmonic model of speech" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799220/On_the_robustness_of_the_Quasi_Harmonic_model_of_speech">On the robustness of the Quasi-Harmonic model of speech</a></div><div class="wp-workCard_item"><span>Acoustics Speech and Signal Processing 1988 Icassp 88 1988 International Conference on</span><span>, Mar 14, 2010</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">... Yannis Pantazis 1 , Olivier Rosec 2 and Yannis Stylianou 1 ... FORTH, and Multimedia Informat...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">... Yannis Pantazis 1 , Olivier Rosec 2 and Yannis Stylianou 1 ... FORTH, and Multimedia Informatics Lab, CSD, UoC, Greece 2 Orange Labs TECH/SSTP/VMI, Lannion, France email: <a href="mailto:pantazis@csd.uoc.gr" rel="nofollow">pantazis@csd.uoc.gr</a>, <a href="mailto:olivier.rosec@orange-ftgroup.com" rel="nofollow">olivier.rosec@orange-ftgroup.com</a> and <a href="mailto:yannis@csd.uoc.gr" rel="nofollow">yannis@csd.uoc.gr</a> ... η/2π (Hz) (c) ...</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799220"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799220"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799220; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799220]").text(description); $(".js-view-count[data-work-id=26799220]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799220; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799220']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799220, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799220]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799220,"title":"On the robustness of the Quasi-Harmonic model of speech","translated_title":"","metadata":{"abstract":"... Yannis Pantazis 1 , Olivier Rosec 2 and Yannis Stylianou 1 ... FORTH, and Multimedia Informatics Lab, CSD, UoC, Greece 2 Orange Labs TECH/SSTP/VMI, Lannion, France email: pantazis@csd.uoc.gr, olivier.rosec@orange-ftgroup.com and yannis@csd.uoc.gr ... η/2π (Hz) (c) ...","publication_date":{"day":14,"month":3,"year":2010,"errors":{}},"publication_name":"Acoustics Speech and Signal Processing 1988 Icassp 88 1988 International Conference on"},"translated_abstract":"... Yannis Pantazis 1 , Olivier Rosec 2 and Yannis Stylianou 1 ... FORTH, and Multimedia Informatics Lab, CSD, UoC, Greece 2 Orange Labs TECH/SSTP/VMI, Lannion, France email: pantazis@csd.uoc.gr, olivier.rosec@orange-ftgroup.com and yannis@csd.uoc.gr ... η/2π (Hz) (c) ...","internal_url":"https://www.academia.edu/26799220/On_the_robustness_of_the_Quasi_Harmonic_model_of_speech","translated_internal_url":"","created_at":"2016-07-07T00:54:22.920-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"On_the_robustness_of_the_Quasi_Harmonic_model_of_speech","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[{"id":1440,"name":"Visualization","url":"https://www.academia.edu/Documents/in/Visualization"},{"id":2342,"name":"Speech Synthesis","url":"https://www.academia.edu/Documents/in/Speech_Synthesis"},{"id":8056,"name":"Speech Acoustics","url":"https://www.academia.edu/Documents/in/Speech_Acoustics"},{"id":36835,"name":"Speech Processing","url":"https://www.academia.edu/Documents/in/Speech_Processing"},{"id":40738,"name":"Signal Analysis","url":"https://www.academia.edu/Documents/in/Signal_Analysis"},{"id":82241,"name":"Cramer Rao Lower Bound","url":"https://www.academia.edu/Documents/in/Cramer_Rao_Lower_Bound"},{"id":139657,"name":"Frame Analysis","url":"https://www.academia.edu/Documents/in/Frame_Analysis"},{"id":279495,"name":"Robustness","url":"https://www.academia.edu/Documents/in/Robustness"},{"id":299796,"name":"Speech Coding","url":"https://www.academia.edu/Documents/in/Speech_Coding"},{"id":368258,"name":"Speech analysis","url":"https://www.academia.edu/Documents/in/Speech_analysis"},{"id":375860,"name":"Frequency Estimation","url":"https://www.academia.edu/Documents/in/Frequency_Estimation"},{"id":582384,"name":"Gaussian noise","url":"https://www.academia.edu/Documents/in/Gaussian_noise"}],"urls":[{"id":7294929,"url":"http://dx.doi.org/10.1109/icassp.2010.5495700"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799219"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799219/Mod%C3%A8les_GMM_et_algorithme_de_brandt_pour_la_correction_de_la_segmentation_de_la_parole_par_HMM"><img alt="Research paper thumbnail of Modèles GMM et algorithme de brandt pour la correction de la segmentation de la parole par HMM" class="work-thumbnail" src="https://attachments.academia-assets.com/47073268/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799219/Mod%C3%A8les_GMM_et_algorithme_de_brandt_pour_la_correction_de_la_segmentation_de_la_parole_par_HMM">Modèles GMM et algorithme de brandt pour la correction de la segmentation de la parole par HMM</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="d5694fa71543928f2cd0166fe65c051d" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073268,"asset_id":26799219,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073268/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799219"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799219"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799219; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799219]").text(description); $(".js-view-count[data-work-id=26799219]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799219; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799219']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799219, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "d5694fa71543928f2cd0166fe65c051d" } } $('.js-work-strip[data-work-id=26799219]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799219,"title":"Modèles GMM et algorithme de brandt pour la correction de la segmentation de la parole par HMM","translated_title":"","metadata":{"grobid_abstract":"On compare les performances de deux algorithmes de segmentation automatique. Le premier, nommé \"HMM amélioré\", affine la segmentation produite par les modèles de Markov cachés (HMM). Le deuxième est l'algorithme de Brandt qui vise, quantà lui,à détecter les ruptures de stationnarité. Le premier algorithme requiert la connaissance a priori de la phonétisation, le second non.Étant donné que l'algorithme de Brandt commet des insertions et des omissions, ce qui n'est pas le cas du HMM amélioré, on introduit une généralisation du taux de segmentation correcte (TSC) afin de comparer ces deux algorithmes. Les mesures expérimentales des TSCs permettent d'évaluer une limite supérieure des performances de l'algorithme de Brandt et suggèrent de combiner ces deux méthodes avec d'autres algorithmes adaptésà la séparation des classes acoustico-phonétiques.","publication_date":{"day":null,"month":null,"year":2005,"errors":{}},"grobid_abstract_attachment_id":47073268},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799219/Mod%C3%A8les_GMM_et_algorithme_de_brandt_pour_la_correction_de_la_segmentation_de_la_parole_par_HMM","translated_internal_url":"","created_at":"2016-07-07T00:54:22.765-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073268,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073268/thumbnails/1.jpg","file_name":"Modles_GMM_et_algorithme_de_brandt_pour_20160707-9766-duztni.pdf","download_url":"https://www.academia.edu/attachments/47073268/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Modeles_GMM_et_algorithme_de_brandt_pour.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073268/Modles_GMM_et_algorithme_de_brandt_pour_20160707-9766-duztni-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DModeles_GMM_et_algorithme_de_brandt_pour.pdf\u0026Expires=1732441119\u0026Signature=ge4zZuFVhsAk1Yoyc49LIJFwShEBeDk24yXO-4uRed-aCD4M0qDmw5ZKY5YZrYJgL7183R9TLQX6~2rFWhhn2d8jNCWdyrluX7LkkoVJlyAeyw1r4deg2BKLh6TcWiKMVvUiCCzJgyKMyFbTEPrEiU8KPFL9y8IiXv-dw4mZhJBxZqTvlWZCocGLgmrh2ODyiWBUyC2nru2O14gynYHwBe9b8z2MO8dzjRxmgQq7INnTNoGyPcaWj7xULtPsX6Th2wN2IFA5bN3gUiYbIWyvz3FDHkdY-jzCh915DEets9ly~6ws3S1P6CfE67u-PFbWcY7l5jqKciOGBOAk18DhKQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Modèles_GMM_et_algorithme_de_brandt_pour_la_correction_de_la_segmentation_de_la_parole_par_HMM","translated_slug":"","page_count":4,"language":"fr","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073268,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073268/thumbnails/1.jpg","file_name":"Modles_GMM_et_algorithme_de_brandt_pour_20160707-9766-duztni.pdf","download_url":"https://www.academia.edu/attachments/47073268/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Modeles_GMM_et_algorithme_de_brandt_pour.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073268/Modles_GMM_et_algorithme_de_brandt_pour_20160707-9766-duztni-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DModeles_GMM_et_algorithme_de_brandt_pour.pdf\u0026Expires=1732441119\u0026Signature=ge4zZuFVhsAk1Yoyc49LIJFwShEBeDk24yXO-4uRed-aCD4M0qDmw5ZKY5YZrYJgL7183R9TLQX6~2rFWhhn2d8jNCWdyrluX7LkkoVJlyAeyw1r4deg2BKLh6TcWiKMVvUiCCzJgyKMyFbTEPrEiU8KPFL9y8IiXv-dw4mZhJBxZqTvlWZCocGLgmrh2ODyiWBUyC2nru2O14gynYHwBe9b8z2MO8dzjRxmgQq7INnTNoGyPcaWj7xULtPsX6Th2wN2IFA5bN3gUiYbIWyvz3FDHkdY-jzCh915DEets9ly~6ws3S1P6CfE67u-PFbWcY7l5jqKciOGBOAk18DhKQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799218"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799218/Robust_Full_band_Adaptive_Sinusoidal_Analysis_of_Speech"><img alt="Research paper thumbnail of Robust Full-band Adaptive Sinusoidal Analysis of Speech" class="work-thumbnail" src="https://attachments.academia-assets.com/47073267/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799218/Robust_Full_band_Adaptive_Sinusoidal_Analysis_of_Speech">Robust Full-band Adaptive Sinusoidal Analysis of Speech</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="d2b92f587a096b0543425350ec0c2a8d" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073267,"asset_id":26799218,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073267/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799218"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799218"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799218; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799218]").text(description); $(".js-view-count[data-work-id=26799218]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799218; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799218']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799218, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "d2b92f587a096b0543425350ec0c2a8d" } } $('.js-work-strip[data-work-id=26799218]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799218,"title":"Robust Full-band Adaptive Sinusoidal Analysis of Speech","translated_title":"","metadata":{"grobid_abstract":"Recent advances in speech analysis have shown that voiced speech can be very well represented using quasi-harmonic frequency tracks and local parameter adaptivity to the underlying signal. In this paper, we revisit the quasi-harmonicity approach through the extended adaptive Quasi-Harmonic Model-eaQHM, and we show that the application of a continuous f0 estimation method plus an adaptivity scheme can yield high resolution quasi-harmonic analysis and perceptually indistinguishable resynthesized speech. This method assumes an initial harmonic model which successively converges to quasi-harmonicity. Formal listening tests showed that eaQHM is robust against f0 estimation artefacts and can provide a higher quality in resynthesizing speech, compared to a recently developed model, called the adaptive Harmonic Model (aHM), and the standard Sinusoidal Model (SM).","publication_date":{"day":4,"month":5,"year":2014,"errors":{}},"grobid_abstract_attachment_id":47073267},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799218/Robust_Full_band_Adaptive_Sinusoidal_Analysis_of_Speech","translated_internal_url":"","created_at":"2016-07-07T00:54:22.011-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073267,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073267/thumbnails/1.jpg","file_name":"Robust_Full-band_Adaptive_Sinusoidal_Ana20160707-26160-slt3of.pdf","download_url":"https://www.academia.edu/attachments/47073267/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Robust_Full_band_Adaptive_Sinusoidal_Ana.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073267/Robust_Full-band_Adaptive_Sinusoidal_Ana20160707-26160-slt3of-libre.pdf?1467878543=\u0026response-content-disposition=attachment%3B+filename%3DRobust_Full_band_Adaptive_Sinusoidal_Ana.pdf\u0026Expires=1732441119\u0026Signature=FeHIENqDipGDcLOM9qd~u6rQF9qF9nYqNG1IJn5F3L8rQPzmNG6DjmSh0fPd315fdQ5uLHD1Oge7HNoRQ1yFJoxJ9bJH9~ZOiVlXl~vjTCfiE1zCu8wPQQHFflGGlE1yW9L8osEYig28SZjjTSiDCBTR6goeDyPZC4NaZ97deIc2sXZnU-r0eOLvSTktnO0i2cmpqCWmjNM~tDIfhA3hWMWlrsq3ePVrp9RGcObNxYi3pUu-NRhVokoerslYqHhu0a3tSsMKERCsfhnsjPJSCdiXanXeqKZVHy81LHF1S7~84bMCJHklzUlZV6kDk426HVoQVzyucYXw0tk9U5cz0A__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Robust_Full_band_Adaptive_Sinusoidal_Analysis_of_Speech","translated_slug":"","page_count":5,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073267,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073267/thumbnails/1.jpg","file_name":"Robust_Full-band_Adaptive_Sinusoidal_Ana20160707-26160-slt3of.pdf","download_url":"https://www.academia.edu/attachments/47073267/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Robust_Full_band_Adaptive_Sinusoidal_Ana.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073267/Robust_Full-band_Adaptive_Sinusoidal_Ana20160707-26160-slt3of-libre.pdf?1467878543=\u0026response-content-disposition=attachment%3B+filename%3DRobust_Full_band_Adaptive_Sinusoidal_Ana.pdf\u0026Expires=1732441119\u0026Signature=FeHIENqDipGDcLOM9qd~u6rQF9qF9nYqNG1IJn5F3L8rQPzmNG6DjmSh0fPd315fdQ5uLHD1Oge7HNoRQ1yFJoxJ9bJH9~ZOiVlXl~vjTCfiE1zCu8wPQQHFflGGlE1yW9L8osEYig28SZjjTSiDCBTR6goeDyPZC4NaZ97deIc2sXZnU-r0eOLvSTktnO0i2cmpqCWmjNM~tDIfhA3hWMWlrsq3ePVrp9RGcObNxYi3pUu-NRhVokoerslYqHhu0a3tSsMKERCsfhnsjPJSCdiXanXeqKZVHy81LHF1S7~84bMCJHklzUlZV6kDk426HVoQVzyucYXw0tk9U5cz0A__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799217"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799217/Cooperation_between_global_and_local_methods_for_the_automatic_segmentation_of_speech_synthesis_corpora"><img alt="Research paper thumbnail of Cooperation between global and local methods for the automatic segmentation of speech synthesis corpora" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799217/Cooperation_between_global_and_local_methods_for_the_automatic_segmentation_of_speech_synthesis_corpora">Cooperation between global and local methods for the automatic segmentation of speech synthesis corpora</a></div><div class="wp-workCard_item"><span>Interspeech</span><span>, 2006</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799217"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799217"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799217; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799217]").text(description); $(".js-view-count[data-work-id=26799217]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799217; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799217']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799217, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799217]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799217,"title":"Cooperation between global and local methods for the automatic segmentation of speech synthesis corpora","translated_title":"","metadata":{"publication_date":{"day":null,"month":null,"year":2006,"errors":{}},"publication_name":"Interspeech"},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799217/Cooperation_between_global_and_local_methods_for_the_automatic_segmentation_of_speech_synthesis_corpora","translated_internal_url":"","created_at":"2016-07-07T00:54:21.803-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Cooperation_between_global_and_local_methods_for_the_automatic_segmentation_of_speech_synthesis_corpora","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294928,"url":"http://informatik.uni-trier.de/~ley/db/conf/interspeech/interspeech2006.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799216"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799216/A_new_method_for_pitch_prediction_from_spectral_envelope_and_its_application_in_voice_conversion"><img alt="Research paper thumbnail of A new method for pitch prediction from spectral envelope and its application in voice conversion" class="work-thumbnail" src="https://attachments.academia-assets.com/47073265/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799216/A_new_method_for_pitch_prediction_from_spectral_envelope_and_its_application_in_voice_conversion">A new method for pitch prediction from spectral envelope and its application in voice conversion</a></div><div class="wp-workCard_item"><span>Eurospeech</span><span>, 2003</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="8f2e0063d19df1080f1a5134568e165b" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073265,"asset_id":26799216,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073265/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799216"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799216"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799216; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799216]").text(description); $(".js-view-count[data-work-id=26799216]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799216; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799216']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799216, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "8f2e0063d19df1080f1a5134568e165b" } } $('.js-work-strip[data-work-id=26799216]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799216,"title":"A new method for pitch prediction from spectral envelope and its application in voice conversion","translated_title":"","metadata":{"grobid_abstract":"support vector machine, SVM, speaker identification, speaker verification, KL divergence, Kullback-Leibler divergence, probabilistic distance kernels, multimedia One major SVM weakness has been the use of generic kernel functions to compute distances among data points. Polynomial, linear, and Gaussian are typical examples. They do not take full advantage of the inherent probability distributions of the data. Focusing on audio speaker identification and verification, we propose to explore the use of novel kernel functions that take full advantage of good probabilistic and descriptive models of audio data. We explore the use of generative speaker identification models such as Gaussian Mixture Models and derive a kernel distance based on the Kullback-Leibler (KL) divergence between generative models. In effect our approach combines the best of both generative and discriminative methods. Our results show that these new kernels perform as well as baseline GMM classifiers and outperform generic kernel based SVM's in both speaker identification and verification on two different audio databases.","publication_date":{"day":null,"month":null,"year":2003,"errors":{}},"publication_name":"Eurospeech","grobid_abstract_attachment_id":47073265},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799216/A_new_method_for_pitch_prediction_from_spectral_envelope_and_its_application_in_voice_conversion","translated_internal_url":"","created_at":"2016-07-07T00:54:21.585-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073265,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073265/thumbnails/1.jpg","file_name":"A_new_SVM_approach_to_speaker_identifica20160707-11064-1p1fqr3.pdf","download_url":"https://www.academia.edu/attachments/47073265/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"A_new_method_for_pitch_prediction_from_s.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073265/A_new_SVM_approach_to_speaker_identifica20160707-11064-1p1fqr3-libre.pdf?1467878533=\u0026response-content-disposition=attachment%3B+filename%3DA_new_method_for_pitch_prediction_from_s.pdf\u0026Expires=1732441119\u0026Signature=XuZAtaG751Sh2fWRXb5XDCc7ZZ9Sx5xil3w3wEmaeu~WNYwcAEnL5Zs851Z--eRA24hHpWnp7Wm9dIYUtjJNYmW218sdUPVTWTdQCVDNXG5d0UHU~IUBaObsOOi3HnpR~etK0G5XEyCppV0Vil3Cgsb8DM2PHiHuVtR71AlaONYTN-Gz3GtSpOR78SPIT5FGtQ~YVubHsikqUiQgJioft8Ns4qkD0gAT2SuA7rTq3zAVdW5RF6nLBU88SqJeayooljb45VKXNZ9KW4kntRmoSAbvied-hNoNssZifoQ~7Enoxmhvclo5B83pTVgQPqWCzGoL~QNgn8x2GvNSb0zX~Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"A_new_method_for_pitch_prediction_from_spectral_envelope_and_its_application_in_voice_conversion","translated_slug":"","page_count":10,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073265,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073265/thumbnails/1.jpg","file_name":"A_new_SVM_approach_to_speaker_identifica20160707-11064-1p1fqr3.pdf","download_url":"https://www.academia.edu/attachments/47073265/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"A_new_method_for_pitch_prediction_from_s.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073265/A_new_SVM_approach_to_speaker_identifica20160707-11064-1p1fqr3-libre.pdf?1467878533=\u0026response-content-disposition=attachment%3B+filename%3DA_new_method_for_pitch_prediction_from_s.pdf\u0026Expires=1732441119\u0026Signature=XuZAtaG751Sh2fWRXb5XDCc7ZZ9Sx5xil3w3wEmaeu~WNYwcAEnL5Zs851Z--eRA24hHpWnp7Wm9dIYUtjJNYmW218sdUPVTWTdQCVDNXG5d0UHU~IUBaObsOOi3HnpR~etK0G5XEyCppV0Vil3Cgsb8DM2PHiHuVtR71AlaONYTN-Gz3GtSpOR78SPIT5FGtQ~YVubHsikqUiQgJioft8Ns4qkD0gAT2SuA7rTq3zAVdW5RF6nLBU88SqJeayooljb45VKXNZ9KW4kntRmoSAbvied-hNoNssZifoQ~7Enoxmhvclo5B83pTVgQPqWCzGoL~QNgn8x2GvNSb0zX~Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[],"urls":[{"id":7294927,"url":"http://informatik.uni-trier.de/~ley/db/conf/interspeech/interspeech2003.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799215"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799215/Method_and_System_for_the_Quick_Conversion_of_a_Voice_Signal"><img alt="Research paper thumbnail of Method and System for the Quick Conversion of a Voice Signal" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799215/Method_and_System_for_the_Quick_Conversion_of_a_Voice_Signal">Method and System for the Quick Conversion of a Voice Signal</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799215"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799215"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799215; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799215]").text(description); $(".js-view-count[data-work-id=26799215]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799215; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799215']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799215, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799215]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799215,"title":"Method and System for the Quick Conversion of a Voice Signal","translated_title":"","metadata":{"publication_date":{"day":13,"month":12,"year":2006,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799215/Method_and_System_for_the_Quick_Conversion_of_a_Voice_Signal","translated_internal_url":"","created_at":"2016-07-07T00:54:21.371-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Method_and_System_for_the_Quick_Conversion_of_a_Voice_Signal","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[{"id":28235,"name":"Multidisciplinary","url":"https://www.academia.edu/Documents/in/Multidisciplinary"}],"urls":[{"id":7294926,"url":"http://www.freepatentsonline.com/EP1730728.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799214"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799214/On_the_properties_of_a_time_varying_quasi_harmonic_model_of_speech"><img alt="Research paper thumbnail of On the properties of a time-varying quasi-harmonic model of speech" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799214/On_the_properties_of_a_time_varying_quasi_harmonic_model_of_speech">On the properties of a time-varying quasi-harmonic model of speech</a></div><div class="wp-workCard_item"><span>Interspeech</span><span>, 2008</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799214"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799214"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799214; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799214]").text(description); $(".js-view-count[data-work-id=26799214]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799214; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799214']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799214, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799214]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799214,"title":"On the properties of a time-varying quasi-harmonic model of speech","translated_title":"","metadata":{"publication_date":{"day":null,"month":null,"year":2008,"errors":{}},"publication_name":"Interspeech"},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799214/On_the_properties_of_a_time_varying_quasi_harmonic_model_of_speech","translated_internal_url":"","created_at":"2016-07-07T00:54:21.160-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"On_the_properties_of_a_time_varying_quasi_harmonic_model_of_speech","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294925,"url":"http://informatik.uni-trier.de/~ley/db/conf/interspeech/interspeech2008.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799213"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799213/Voice_signal_conversation_method_and_system"><img alt="Research paper thumbnail of Voice signal conversation method and system" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799213/Voice_signal_conversation_method_and_system">Voice signal conversation method and system</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799213"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799213"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799213; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799213]").text(description); $(".js-view-count[data-work-id=26799213]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799213; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799213']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799213, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799213]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799213,"title":"Voice signal conversation method and system","translated_title":"","metadata":{"publication_date":{"day":27,"month":7,"year":2010,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799213/Voice_signal_conversation_method_and_system","translated_internal_url":"","created_at":"2016-07-07T00:54:20.942-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Voice_signal_conversation_method_and_system","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294924,"url":"http://www.freepatentsonline.com/7765101.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799212"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799212/Alleviating_the_one_to_many_mapping_problem_in_voice_conversion_with_context_dependent_modeling"><img alt="Research paper thumbnail of Alleviating the one-to-many mapping problem in voice conversion with context-dependent modeling" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799212/Alleviating_the_one_to_many_mapping_problem_in_voice_conversion_with_context_dependent_modeling">Alleviating the one-to-many mapping problem in voice conversion with context-dependent modeling</a></div><div class="wp-workCard_item"><span>Interspeech</span><span>, 2009</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799212"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799212"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799212; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799212]").text(description); $(".js-view-count[data-work-id=26799212]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799212; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799212']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799212, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799212]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799212,"title":"Alleviating the one-to-many mapping problem in voice conversion with context-dependent modeling","translated_title":"","metadata":{"publication_date":{"day":null,"month":null,"year":2009,"errors":{}},"publication_name":"Interspeech"},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799212/Alleviating_the_one_to_many_mapping_problem_in_voice_conversion_with_context_dependent_modeling","translated_internal_url":"","created_at":"2016-07-07T00:54:20.737-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Alleviating_the_one_to_many_mapping_problem_in_voice_conversion_with_context_dependent_modeling","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294923,"url":"http://informatik.uni-trier.de/~ley/db/conf/interspeech/interspeech2009.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799211"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799211/Brandts_GLR_method_and_refined_HMM_segmentation_for_TTS_synthesis_application"><img alt="Research paper thumbnail of Brandt's GLR method & refined HMM segmentation for TTS synthesis application" class="work-thumbnail" src="https://attachments.academia-assets.com/47073259/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799211/Brandts_GLR_method_and_refined_HMM_segmentation_for_TTS_synthesis_application">Brandt's GLR method & refined HMM segmentation for TTS synthesis application</a></div><div class="wp-workCard_item"><span>2005 13th European Signal Processing Conference</span><span>, Sep 1, 2005</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="5cdea5c090e86e65575f39b6925c6899" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073259,"asset_id":26799211,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073259/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799211"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799211"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799211; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799211]").text(description); $(".js-view-count[data-work-id=26799211]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799211; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799211']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799211, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "5cdea5c090e86e65575f39b6925c6899" } } $('.js-work-strip[data-work-id=26799211]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799211,"title":"Brandt's GLR method \u0026 refined HMM segmentation for TTS synthesis application","translated_title":"","metadata":{"grobid_abstract":"In comparison with standard HMM (Hidden Markov Model) with forced alignment, this paper discusses two automatic segmentation algorithms from different points of view: the probabilities of insertion and omission, and the accuracy. The first algorithm, hereafter named the refined HMM algorithm, aims at refining the segmentation performed by standard HMM via a GMM (Gaussian Mixture Model) of each boundary. The second is the Brandt's GLR (Generalized Likelihood Ratio) method. Its goal is to detect signal discontinuities. Provided that the sequence of speech units is known, the experimental results presented in this paper suggest in combining the refined HMM algorithm with Brandt's GLR method and other algorithms adapted to the detection of boundaries between known acoustic classes.","publication_date":{"day":1,"month":9,"year":2005,"errors":{}},"publication_name":"2005 13th European Signal Processing Conference","grobid_abstract_attachment_id":47073259},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799211/Brandts_GLR_method_and_refined_HMM_segmentation_for_TTS_synthesis_application","translated_internal_url":"","created_at":"2016-07-07T00:54:20.525-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073259,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073259/thumbnails/1.jpg","file_name":"Brandt_s_GLR_Method__Refined_HMM_Segment20160707-30867-1fltrmc.pdf","download_url":"https://www.academia.edu/attachments/47073259/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Brandts_GLR_method_and_refined_HMM_segme.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073259/Brandt_s_GLR_Method__Refined_HMM_Segment20160707-30867-1fltrmc-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DBrandts_GLR_method_and_refined_HMM_segme.pdf\u0026Expires=1732441119\u0026Signature=Dnh2-sLoMB1LuVu77CM7TVxKJAoq4tixCMPehQw5qzDYUsByPeO8Pmif8FbwxOX08QUxzWZfpsPrEZhZXcFYMsBkWWfK30XGm1XaN3PmsNZbls~J8nbEDDkgO9fF~aVNEaopIPMX3ONUEPlLERU60PHme4rkvo1u5McbaOziVIOnJRieQUmfRtQe3sAPL5jtmosl3bu1FrAM3xnNPqZunbME0imOvyHnv~8QFlMp19cM5d4OPrMYXpmAGaNM5CFfQRXeLxBHgTMlV72z7Bl32jW3aO92gAKJ5AT2jNp6W7DunW2V37U7QUzsUF4-noYqGoW1tRUCclshiAuSY4k3AQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Brandts_GLR_method_and_refined_HMM_segmentation_for_TTS_synthesis_application","translated_slug":"","page_count":4,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073259,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073259/thumbnails/1.jpg","file_name":"Brandt_s_GLR_Method__Refined_HMM_Segment20160707-30867-1fltrmc.pdf","download_url":"https://www.academia.edu/attachments/47073259/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Brandts_GLR_method_and_refined_HMM_segme.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073259/Brandt_s_GLR_Method__Refined_HMM_Segment20160707-30867-1fltrmc-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DBrandts_GLR_method_and_refined_HMM_segme.pdf\u0026Expires=1732441119\u0026Signature=Dnh2-sLoMB1LuVu77CM7TVxKJAoq4tixCMPehQw5qzDYUsByPeO8Pmif8FbwxOX08QUxzWZfpsPrEZhZXcFYMsBkWWfK30XGm1XaN3PmsNZbls~J8nbEDDkgO9fF~aVNEaopIPMX3ONUEPlLERU60PHme4rkvo1u5McbaOziVIOnJRieQUmfRtQe3sAPL5jtmosl3bu1FrAM3xnNPqZunbME0imOvyHnv~8QFlMp19cM5d4OPrMYXpmAGaNM5CFfQRXeLxBHgTMlV72z7Bl32jW3aO92gAKJ5AT2jNp6W7DunW2V37U7QUzsUF4-noYqGoW1tRUCclshiAuSY4k3AQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":499,"name":"Acoustics","url":"https://www.academia.edu/Documents/in/Acoustics"},{"id":42799,"name":"Speech","url":"https://www.academia.edu/Documents/in/Speech"},{"id":68937,"name":"Hidden Markov Models","url":"https://www.academia.edu/Documents/in/Hidden_Markov_Models"},{"id":143539,"name":"hidden Markov model","url":"https://www.academia.edu/Documents/in/hidden_Markov_model"},{"id":220049,"name":"Accuracy","url":"https://www.academia.edu/Documents/in/Accuracy"},{"id":327120,"name":"Gaussian Mixture Model","url":"https://www.academia.edu/Documents/in/Gaussian_Mixture_Model"},{"id":383728,"name":"Vectors","url":"https://www.academia.edu/Documents/in/Vectors"},{"id":892890,"name":"Point of View","url":"https://www.academia.edu/Documents/in/Point_of_View"}],"urls":[{"id":7294922,"url":"http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7078195"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799210"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799210/Modification_of_a_voice_signal"><img alt="Research paper thumbnail of Modification of a voice signal" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799210/Modification_of_a_voice_signal">Modification of a voice signal</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799210"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799210"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799210; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799210]").text(description); $(".js-view-count[data-work-id=26799210]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799210; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799210']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799210, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799210]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799210,"title":"Modification of a voice signal","translated_title":"","metadata":{"publication_date":{"day":17,"month":3,"year":2010,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799210/Modification_of_a_voice_signal","translated_internal_url":"","created_at":"2016-07-07T00:54:20.309-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Modification_of_a_voice_signal","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294921,"url":"http://www.freepatentsonline.com/EP1944755.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799209"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799209/Method_and_Device_for_Selecting_Acoustic_Units_and_a_Voice_Synthesis_Device"><img alt="Research paper thumbnail of Method and Device for Selecting Acoustic Units and a Voice Synthesis Device" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799209/Method_and_Device_for_Selecting_Acoustic_Units_and_a_Voice_Synthesis_Device">Method and Device for Selecting Acoustic Units and a Voice Synthesis Device</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799209"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799209"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799209; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799209]").text(description); $(".js-view-count[data-work-id=26799209]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799209; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799209']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799209, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799209]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799209,"title":"Method and Device for Selecting Acoustic Units and a Voice Synthesis Device","translated_title":"","metadata":{"publication_date":{"day":20,"month":1,"year":2010,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799209/Method_and_Device_for_Selecting_Acoustic_Units_and_a_Voice_Synthesis_Device","translated_internal_url":"","created_at":"2016-07-07T00:54:20.076-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Method_and_Device_for_Selecting_Acoustic_Units_and_a_Voice_Synthesis_Device","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294920,"url":"http://www.freepatentsonline.com/EP1789953.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> </div><div class="profile--tab_content_container js-tab-pane tab-pane" data-section-id="3681207" id="papers"><div class="js-work-strip profile--work_container" data-work-id="26799237"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799237/Improved_Voice_Signal_Conversion_Method_and_System"><img alt="Research paper thumbnail of Improved Voice Signal Conversion Method and System" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799237/Improved_Voice_Signal_Conversion_Method_and_System">Improved Voice Signal Conversion Method and System</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799237"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799237"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799237; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799237]").text(description); $(".js-view-count[data-work-id=26799237]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799237; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799237']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799237, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799237]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799237,"title":"Improved Voice Signal Conversion Method and System","translated_title":"","metadata":{"publication_date":{"day":13,"month":12,"year":2006,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799237/Improved_Voice_Signal_Conversion_Method_and_System","translated_internal_url":"","created_at":"2016-07-07T00:55:23.453-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Improved_Voice_Signal_Conversion_Method_and_System","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294934,"url":"http://www.freepatentsonline.com/EP1730729.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799234"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799234/Method_and_device_for_modifying_an_audio_signal"><img alt="Research paper thumbnail of Method and device for modifying an audio signal" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799234/Method_and_device_for_modifying_an_audio_signal">Method and device for modifying an audio signal</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799234"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799234"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799234; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799234]").text(description); $(".js-view-count[data-work-id=26799234]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799234; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799234']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799234, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799234]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799234,"title":"Method and device for modifying an audio signal","translated_title":"","metadata":{"publication_date":{"day":21,"month":2,"year":2012,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799234/Method_and_device_for_modifying_an_audio_signal","translated_internal_url":"","created_at":"2016-07-07T00:55:15.738-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Method_and_device_for_modifying_an_audio_signal","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294933,"url":"http://www.freepatentsonline.com/8121834.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26479065"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26479065/Towards_flexible_speech_coding_for_speech_synthesis_an_LF_modulated_noise_vocoder"><img alt="Research paper thumbnail of Towards flexible speech coding for speech synthesis: an LF+ modulated noise vocoder" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26479065/Towards_flexible_speech_coding_for_speech_synthesis_an_LF_modulated_noise_vocoder">Towards flexible speech coding for speech synthesis: an LF+ modulated noise vocoder</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/OlivierRosec">Olivier Rosec</a> and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/YannisAgiomyrgiannakis">Yannis Agiomyrgiannakis</a></span></div><div class="wp-workCard_item"><span>Ninth Annual Conference of the …</span><span>, 2008</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">... Orange Labs, TECH-SSTP-VMI {yannis.agiomyrgiannakis, olivier.rosec}@orange-ftgroup.com ... t ...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">... Orange Labs, TECH-SSTP-VMI {yannis.agiomyrgiannakis, olivier.rosec}@orange-ftgroup.com ... t c open phase ... [5] Y. Stylianou, Harmonic-plus-noise Models for speech, combined with statistical methods for speech and speaker modification, Ph.D. dissertation, Ecole Nationale. ...</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26479065"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26479065"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26479065; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26479065]").text(description); $(".js-view-count[data-work-id=26479065]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26479065; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26479065']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26479065, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26479065]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26479065,"title":"Towards flexible speech coding for speech synthesis: an LF+ modulated noise vocoder","translated_title":"","metadata":{"abstract":"... Orange Labs, TECH-SSTP-VMI {yannis.agiomyrgiannakis, olivier.rosec}@orange-ftgroup.com ... t c open phase ... [5] Y. Stylianou, Harmonic-plus-noise Models for speech, combined with statistical methods for speech and speaker modification, Ph.D. dissertation, Ecole Nationale. ...","publisher":"isca-speech.org","publication_date":{"day":null,"month":null,"year":2008,"errors":{}},"publication_name":"Ninth Annual Conference of the …"},"translated_abstract":"... Orange Labs, TECH-SSTP-VMI {yannis.agiomyrgiannakis, olivier.rosec}@orange-ftgroup.com ... t c open phase ... [5] Y. Stylianou, Harmonic-plus-noise Models for speech, combined with statistical methods for speech and speaker modification, Ph.D. dissertation, Ecole Nationale. ...","internal_url":"https://www.academia.edu/26479065/Towards_flexible_speech_coding_for_speech_synthesis_an_LF_modulated_noise_vocoder","translated_internal_url":"","created_at":"2016-06-24T23:21:36.663-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":50438551,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":21726055,"work_id":26479065,"tagging_user_id":50438551,"tagged_user_id":35678791,"co_author_invite_id":null,"email":"o***c@voxygen.fr","display_order":0,"name":"Olivier Rosec","title":"Towards flexible speech coding for speech synthesis: an LF+ modulated noise vocoder"}],"downloadable_attachments":[],"slug":"Towards_flexible_speech_coding_for_speech_synthesis_an_LF_modulated_noise_vocoder","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":50438551,"first_name":"Yannis","middle_initials":null,"last_name":"Agiomyrgiannakis","page_name":"YannisAgiomyrgiannakis","domain_name":"independent","created_at":"2016-06-24T23:21:25.598-07:00","display_name":"Yannis Agiomyrgiannakis","url":"https://independent.academia.edu/YannisAgiomyrgiannakis"},"attachments":[],"research_interests":[{"id":2342,"name":"Speech Synthesis","url":"https://www.academia.edu/Documents/in/Speech_Synthesis"},{"id":299796,"name":"Speech Coding","url":"https://www.academia.edu/Documents/in/Speech_Coding"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799227"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799227/Segmentation_Automatique_De_Corpus_De_Parole_Continue_D%C3%A9di%C3%A9s_%C3%80_La_Synth%C3%A8se_Vocale"><img alt="Research paper thumbnail of Segmentation Automatique De Corpus De Parole Continue Dédiés À La Synthèse Vocale" class="work-thumbnail" src="https://attachments.academia-assets.com/47073271/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799227/Segmentation_Automatique_De_Corpus_De_Parole_Continue_D%C3%A9di%C3%A9s_%C3%80_La_Synth%C3%A8se_Vocale">Segmentation Automatique De Corpus De Parole Continue Dédiés À La Synthèse Vocale</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="a28cbd55e0a2a9c6d801527714f11b0c" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073271,"asset_id":26799227,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073271/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799227"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799227"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799227; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799227]").text(description); $(".js-view-count[data-work-id=26799227]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799227; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799227']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799227, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "a28cbd55e0a2a9c6d801527714f11b0c" } } $('.js-work-strip[data-work-id=26799227]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799227,"title":"Segmentation Automatique De Corpus De Parole Continue Dédiés À La Synthèse Vocale","translated_title":"","metadata":{},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799227/Segmentation_Automatique_De_Corpus_De_Parole_Continue_D%C3%A9di%C3%A9s_%C3%80_La_Synth%C3%A8se_Vocale","translated_internal_url":"","created_at":"2016-07-07T00:54:40.488-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073271,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073271/thumbnails/1.jpg","file_name":"SEGMENTATION_AUTOMATIQUE_DE_CORPUS_DE_PA20160707-30874-1wwx6tl.pdf","download_url":"https://www.academia.edu/attachments/47073271/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Segmentation_Automatique_De_Corpus_De_Pa.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073271/SEGMENTATION_AUTOMATIQUE_DE_CORPUS_DE_PA20160707-30874-1wwx6tl-libre.pdf?1467878533=\u0026response-content-disposition=attachment%3B+filename%3DSegmentation_Automatique_De_Corpus_De_Pa.pdf\u0026Expires=1732441119\u0026Signature=Sel22yTgxF7ChrDf-PnVWkgfocGSx-cg8Uk4b5w7tqG~G3OwETBJqBOMCmA2Un~LylfghN8g9qjRTX9bYc-Vom6qgTEhPYPqOxV1lqGlnvJcn1UwQgjqnc0ccoR656lgUVUPdjzG2fundfePp3FimjLxL9Q1pFtVRAc4mJyhFoOX1H4CXmKymNsgDE5SaKLG4Xz1so2umAljgexCD9DsJX-Lhcbo7-hiKjFEeExQzzm~LEU4x~duCXg77SZ5jSm1Pf3Mvp~u02BzO63l5VMm5d76HyWxVoqRH0tl1FqAViqkx8w3F-6rEKav4sO0ZKq3VcAuKhqmqIiYdCB1alGFoA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Segmentation_Automatique_De_Corpus_De_Parole_Continue_Dédiés_À_La_Synthèse_Vocale","translated_slug":"","page_count":19,"language":"fr","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073271,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073271/thumbnails/1.jpg","file_name":"SEGMENTATION_AUTOMATIQUE_DE_CORPUS_DE_PA20160707-30874-1wwx6tl.pdf","download_url":"https://www.academia.edu/attachments/47073271/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Segmentation_Automatique_De_Corpus_De_Pa.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073271/SEGMENTATION_AUTOMATIQUE_DE_CORPUS_DE_PA20160707-30874-1wwx6tl-libre.pdf?1467878533=\u0026response-content-disposition=attachment%3B+filename%3DSegmentation_Automatique_De_Corpus_De_Pa.pdf\u0026Expires=1732441119\u0026Signature=Sel22yTgxF7ChrDf-PnVWkgfocGSx-cg8Uk4b5w7tqG~G3OwETBJqBOMCmA2Un~LylfghN8g9qjRTX9bYc-Vom6qgTEhPYPqOxV1lqGlnvJcn1UwQgjqnc0ccoR656lgUVUPdjzG2fundfePp3FimjLxL9Q1pFtVRAc4mJyhFoOX1H4CXmKymNsgDE5SaKLG4Xz1so2umAljgexCD9DsJX-Lhcbo7-hiKjFEeExQzzm~LEU4x~duCXg77SZ5jSm1Pf3Mvp~u02BzO63l5VMm5d76HyWxVoqRH0tl1FqAViqkx8w3F-6rEKav4sO0ZKq3VcAuKhqmqIiYdCB1alGFoA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799225"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799225/Adapting_prosodic_chunking_algorithm_and_synthesis_system_to_specific_style_the_case_of_dictation"><img alt="Research paper thumbnail of Adapting prosodic chunking algorithm and synthesis system to specific style: the case of dictation" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799225/Adapting_prosodic_chunking_algorithm_and_synthesis_system_to_specific_style_the_case_of_dictation">Adapting prosodic chunking algorithm and synthesis system to specific style: the case of dictation</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">ABSTRACT In this paper, we present an approach that allows a TTS- system to dictate texts to prim...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">ABSTRACT In this paper, we present an approach that allows a TTS- system to dictate texts to primary school pupils, while being in conformity with the prosodic features of this speaking style. The approach relies on the elaboration of a preprocessing prosodic module that avoids developing a specific system for a so limited task. The proposal is based on two distinct elements: (i) the results of a preliminary evaluation that allowed getting feedback from potential users; (ii) a corpus study of 10 dictations annotated or uttered by 13 teachers or speech therapists (10 and 3 respectively). The preliminary evaluation focused on three points: the accuracy of the segmentation procedure, the size of the automatically calculated chunks, and the intelligibility of the synthesized voice. It showed that the chunks were judged too long, and the speaking rate too fast. We thus decided to work on these two issues while analyzing the collected data, and confronting the obtained realizations with the outcome of the speech synthesis system and the chunking algorithm. The results of the analysis lead to propose a module that provides for this speaking style an enriched text that can be treated by the synthesizer to constrain the unit selection and the prosodic realization.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799225"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799225"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799225; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799225]").text(description); $(".js-view-count[data-work-id=26799225]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799225; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799225']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799225, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799225]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799225,"title":"Adapting prosodic chunking algorithm and synthesis system to specific style: the case of dictation","translated_title":"","metadata":{"abstract":"ABSTRACT In this paper, we present an approach that allows a TTS- system to dictate texts to primary school pupils, while being in conformity with the prosodic features of this speaking style. The approach relies on the elaboration of a preprocessing prosodic module that avoids developing a specific system for a so limited task. The proposal is based on two distinct elements: (i) the results of a preliminary evaluation that allowed getting feedback from potential users; (ii) a corpus study of 10 dictations annotated or uttered by 13 teachers or speech therapists (10 and 3 respectively). The preliminary evaluation focused on three points: the accuracy of the segmentation procedure, the size of the automatically calculated chunks, and the intelligibility of the synthesized voice. It showed that the chunks were judged too long, and the speaking rate too fast. We thus decided to work on these two issues while analyzing the collected data, and confronting the obtained realizations with the outcome of the speech synthesis system and the chunking algorithm. The results of the analysis lead to propose a module that provides for this speaking style an enriched text that can be treated by the synthesizer to constrain the unit selection and the prosodic realization."},"translated_abstract":"ABSTRACT In this paper, we present an approach that allows a TTS- system to dictate texts to primary school pupils, while being in conformity with the prosodic features of this speaking style. The approach relies on the elaboration of a preprocessing prosodic module that avoids developing a specific system for a so limited task. The proposal is based on two distinct elements: (i) the results of a preliminary evaluation that allowed getting feedback from potential users; (ii) a corpus study of 10 dictations annotated or uttered by 13 teachers or speech therapists (10 and 3 respectively). The preliminary evaluation focused on three points: the accuracy of the segmentation procedure, the size of the automatically calculated chunks, and the intelligibility of the synthesized voice. It showed that the chunks were judged too long, and the speaking rate too fast. We thus decided to work on these two issues while analyzing the collected data, and confronting the obtained realizations with the outcome of the speech synthesis system and the chunking algorithm. The results of the analysis lead to propose a module that provides for this speaking style an enriched text that can be treated by the synthesizer to constrain the unit selection and the prosodic realization.","internal_url":"https://www.academia.edu/26799225/Adapting_prosodic_chunking_algorithm_and_synthesis_system_to_specific_style_the_case_of_dictation","translated_internal_url":"","created_at":"2016-07-07T00:54:32.108-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Adapting_prosodic_chunking_algorithm_and_synthesis_system_to_specific_style_the_case_of_dictation","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799224"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799224/Algorithme_de_d%C3%A9coupage_en_groupes_prosodiques_pour_la_dict%C3%A9e_par_lusage_de_synth%C3%A8se_vocale"><img alt="Research paper thumbnail of Algorithme de découpage en groupes prosodiques pour la dictée par l'usage de synthèse vocale" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799224/Algorithme_de_d%C3%A9coupage_en_groupes_prosodiques_pour_la_dict%C3%A9e_par_lusage_de_synth%C3%A8se_vocale">Algorithme de découpage en groupes prosodiques pour la dictée par l'usage de synthèse vocale</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799224"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799224"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799224; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799224]").text(description); $(".js-view-count[data-work-id=26799224]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799224; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799224']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799224, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799224]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799224,"title":"Algorithme de découpage en groupes prosodiques pour la dictée par l'usage de synthèse vocale","translated_title":"","metadata":{},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799224/Algorithme_de_d%C3%A9coupage_en_groupes_prosodiques_pour_la_dict%C3%A9e_par_lusage_de_synth%C3%A8se_vocale","translated_internal_url":"","created_at":"2016-07-07T00:54:31.978-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Algorithme_de_découpage_en_groupes_prosodiques_pour_la_dictée_par_lusage_de_synthèse_vocale","translated_slug":"","page_count":null,"language":"fr","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799223"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799223/Concatenation_cost_calculation_and_optimisation_for_unit_selection_in_TTS"><img alt="Research paper thumbnail of Concatenation cost calculation and optimisation for unit selection in TTS" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799223/Concatenation_cost_calculation_and_optimisation_for_unit_selection_in_TTS">Concatenation cost calculation and optimisation for unit selection in TTS</a></div><div class="wp-workCard_item"><span>Proceedings of 2002 IEEE Workshop on Speech Synthesis 2002 WSS-02</span><span>, 2002</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">... Christophe Blouin *, Olivier Rosec *, Paul C. Bagshaw * C? Christophe d&#x27;Alessandro ** *F...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">... Christophe Blouin *, Olivier Rosec *, Paul C. Bagshaw * C? Christophe d&#x27;Alessandro ** *France TClCcom R&amp;D FTRD/DIH, **LIMSI, CNRS, Orsay E-mail: (christophe.blouinlolivier.rosecIpaul. bagshaw)@rd.francetelecom.com, <a href="mailto:cda@limsi.fr" rel="nofollow">cda@limsi.fr</a> ... [Bree&#x27;98] Breen A. and Jackson P ...</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799223"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799223"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799223; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799223]").text(description); $(".js-view-count[data-work-id=26799223]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799223; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799223']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799223, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799223]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799223,"title":"Concatenation cost calculation and optimisation for unit selection in TTS","translated_title":"","metadata":{"abstract":"... Christophe Blouin *, Olivier Rosec *, Paul C. Bagshaw * C? Christophe d\u0026#x27;Alessandro ** *France TClCcom R\u0026amp;D FTRD/DIH, **LIMSI, CNRS, Orsay E-mail: (christophe.blouinlolivier.rosecIpaul. bagshaw)@rd.francetelecom.com, cda@limsi.fr ... [Bree\u0026#x27;98] Breen A. and Jackson P ...","publication_date":{"day":null,"month":null,"year":2002,"errors":{}},"publication_name":"Proceedings of 2002 IEEE Workshop on Speech Synthesis 2002 WSS-02"},"translated_abstract":"... Christophe Blouin *, Olivier Rosec *, Paul C. Bagshaw * C? Christophe d\u0026#x27;Alessandro ** *France TClCcom R\u0026amp;D FTRD/DIH, **LIMSI, CNRS, Orsay E-mail: (christophe.blouinlolivier.rosecIpaul. bagshaw)@rd.francetelecom.com, cda@limsi.fr ... [Bree\u0026#x27;98] Breen A. and Jackson P ...","internal_url":"https://www.academia.edu/26799223/Concatenation_cost_calculation_and_optimisation_for_unit_selection_in_TTS","translated_internal_url":"","created_at":"2016-07-07T00:54:31.847-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Concatenation_cost_calculation_and_optimisation_for_unit_selection_in_TTS","translated_slug":"","page_count":null,"language":"fr","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[{"id":2342,"name":"Speech Synthesis","url":"https://www.academia.edu/Documents/in/Speech_Synthesis"},{"id":40276,"name":"Proceedings","url":"https://www.academia.edu/Documents/in/Proceedings"},{"id":199316,"name":"Multiple Linear Regression","url":"https://www.academia.edu/Documents/in/Multiple_Linear_Regression"},{"id":999121,"name":"Unit Selection","url":"https://www.academia.edu/Documents/in/Unit_Selection"},{"id":1766708,"name":"WSS","url":"https://www.academia.edu/Documents/in/WSS"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799221"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799221/A_voice_conversion_method_based_on_joint_pitch_and_spectral_envelope_transformation"><img alt="Research paper thumbnail of A voice conversion method based on joint pitch and spectral envelope transformation" class="work-thumbnail" src="https://attachments.academia-assets.com/47073266/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799221/A_voice_conversion_method_based_on_joint_pitch_and_spectral_envelope_transformation">A voice conversion method based on joint pitch and spectral envelope transformation</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="14feb752e3c20c9d8dd92afe2f5146a6" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073266,"asset_id":26799221,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073266/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799221"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799221"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799221; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799221]").text(description); $(".js-view-count[data-work-id=26799221]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799221; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799221']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799221, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "14feb752e3c20c9d8dd92afe2f5146a6" } } $('.js-work-strip[data-work-id=26799221]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799221,"title":"A voice conversion method based on joint pitch and spectral envelope transformation","translated_title":"","metadata":{"grobid_abstract":"Most of the research in Voice Conversion (VC) is devoted to spectral transformation while the conversion of prosodic features is essentially obtained through a simple linear transformation of pitch. These separate transformations lead to an unsatisfactory speech conversion quality, especially when the speaking styles of the source and target speakers are different. In this paper, we propose a method capable of jointly converting pitch and spectral envelope information. The parameters to be transformed are obtained by combining scaled pitch values with the spectral envelope parameters for the voiced frames and only spectral envelope parameters for the unvoiced ones. These parameters are clustered using a Gaussian Mixture Model (GMM). Then the transformation functions are determined using a conditional expectation estimator. Tests carried out show that, this process leads to a satisfactory pitch transformation. Moreover, it makes the spectral envelope transformation more robust.","publication_date":{"day":null,"month":null,"year":2004,"errors":{}},"grobid_abstract_attachment_id":47073266},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799221/A_voice_conversion_method_based_on_joint_pitch_and_spectral_envelope_transformation","translated_internal_url":"","created_at":"2016-07-07T00:54:23.135-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073266,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073266/thumbnails/1.jpg","file_name":"A_voice_conversion_method_based_on_joint20160707-31546-1amwrvh.pdf","download_url":"https://www.academia.edu/attachments/47073266/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"A_voice_conversion_method_based_on_joint.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073266/A_voice_conversion_method_based_on_joint20160707-31546-1amwrvh-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DA_voice_conversion_method_based_on_joint.pdf\u0026Expires=1732441119\u0026Signature=f1cZfb4pxr4AkHhykLXQWvrcWNgxXCMSKBal3jHPZfqYUZ1yZ1Fk8~Rppn5wOmv02ICIbGTru4PiAZFzTS7d9xVjTXix9MgQFupnJqHn5jkpCKF-4tvF-hrxu59LXWai71GMIp5HeZltMmjAmj2Ezs~mwDnblXuygOouUvV5OYaseTitqAB6puWX5uvJ8nllO8l0da~Zdb1XWQ6elvn0AiGuxFKr9gr20LcvdtaqQudM5nfuLau11tiA9l3wu0QTzf5E0IhuM4u4tJe306FC7tEkuqIHFuhNViVBtXyJUs-Tqt0kx~ygronftcn8lxV6G2rVohaCvfa3SLD09RMxxw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"A_voice_conversion_method_based_on_joint_pitch_and_spectral_envelope_transformation","translated_slug":"","page_count":4,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073266,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073266/thumbnails/1.jpg","file_name":"A_voice_conversion_method_based_on_joint20160707-31546-1amwrvh.pdf","download_url":"https://www.academia.edu/attachments/47073266/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"A_voice_conversion_method_based_on_joint.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073266/A_voice_conversion_method_based_on_joint20160707-31546-1amwrvh-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DA_voice_conversion_method_based_on_joint.pdf\u0026Expires=1732441119\u0026Signature=f1cZfb4pxr4AkHhykLXQWvrcWNgxXCMSKBal3jHPZfqYUZ1yZ1Fk8~Rppn5wOmv02ICIbGTru4PiAZFzTS7d9xVjTXix9MgQFupnJqHn5jkpCKF-4tvF-hrxu59LXWai71GMIp5HeZltMmjAmj2Ezs~mwDnblXuygOouUvV5OYaseTitqAB6puWX5uvJ8nllO8l0da~Zdb1XWQ6elvn0AiGuxFKr9gr20LcvdtaqQudM5nfuLau11tiA9l3wu0QTzf5E0IhuM4u4tJe306FC7tEkuqIHFuhNViVBtXyJUs-Tqt0kx~ygronftcn8lxV6G2rVohaCvfa3SLD09RMxxw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":64590,"name":"Voice Conversion","url":"https://www.academia.edu/Documents/in/Voice_Conversion"},{"id":327120,"name":"Gaussian Mixture Model","url":"https://www.academia.edu/Documents/in/Gaussian_Mixture_Model"},{"id":521553,"name":"Conditional Expectation","url":"https://www.academia.edu/Documents/in/Conditional_Expectation"},{"id":2277205,"name":"Linear Transformation","url":"https://www.academia.edu/Documents/in/Linear_Transformation"}],"urls":[{"id":7294930,"url":"http://eurecom.fr/~ennajjar/pub/en-najjary-icslp-2004.pdf"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799220"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799220/On_the_robustness_of_the_Quasi_Harmonic_model_of_speech"><img alt="Research paper thumbnail of On the robustness of the Quasi-Harmonic model of speech" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799220/On_the_robustness_of_the_Quasi_Harmonic_model_of_speech">On the robustness of the Quasi-Harmonic model of speech</a></div><div class="wp-workCard_item"><span>Acoustics Speech and Signal Processing 1988 Icassp 88 1988 International Conference on</span><span>, Mar 14, 2010</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">... Yannis Pantazis 1 , Olivier Rosec 2 and Yannis Stylianou 1 ... FORTH, and Multimedia Informat...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">... Yannis Pantazis 1 , Olivier Rosec 2 and Yannis Stylianou 1 ... FORTH, and Multimedia Informatics Lab, CSD, UoC, Greece 2 Orange Labs TECH/SSTP/VMI, Lannion, France email: <a href="mailto:pantazis@csd.uoc.gr" rel="nofollow">pantazis@csd.uoc.gr</a>, <a href="mailto:olivier.rosec@orange-ftgroup.com" rel="nofollow">olivier.rosec@orange-ftgroup.com</a> and <a href="mailto:yannis@csd.uoc.gr" rel="nofollow">yannis@csd.uoc.gr</a> ... η/2π (Hz) (c) ...</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799220"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799220"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799220; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799220]").text(description); $(".js-view-count[data-work-id=26799220]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799220; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799220']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799220, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799220]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799220,"title":"On the robustness of the Quasi-Harmonic model of speech","translated_title":"","metadata":{"abstract":"... Yannis Pantazis 1 , Olivier Rosec 2 and Yannis Stylianou 1 ... FORTH, and Multimedia Informatics Lab, CSD, UoC, Greece 2 Orange Labs TECH/SSTP/VMI, Lannion, France email: pantazis@csd.uoc.gr, olivier.rosec@orange-ftgroup.com and yannis@csd.uoc.gr ... η/2π (Hz) (c) ...","publication_date":{"day":14,"month":3,"year":2010,"errors":{}},"publication_name":"Acoustics Speech and Signal Processing 1988 Icassp 88 1988 International Conference on"},"translated_abstract":"... Yannis Pantazis 1 , Olivier Rosec 2 and Yannis Stylianou 1 ... FORTH, and Multimedia Informatics Lab, CSD, UoC, Greece 2 Orange Labs TECH/SSTP/VMI, Lannion, France email: pantazis@csd.uoc.gr, olivier.rosec@orange-ftgroup.com and yannis@csd.uoc.gr ... η/2π (Hz) (c) ...","internal_url":"https://www.academia.edu/26799220/On_the_robustness_of_the_Quasi_Harmonic_model_of_speech","translated_internal_url":"","created_at":"2016-07-07T00:54:22.920-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"On_the_robustness_of_the_Quasi_Harmonic_model_of_speech","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[{"id":1440,"name":"Visualization","url":"https://www.academia.edu/Documents/in/Visualization"},{"id":2342,"name":"Speech Synthesis","url":"https://www.academia.edu/Documents/in/Speech_Synthesis"},{"id":8056,"name":"Speech Acoustics","url":"https://www.academia.edu/Documents/in/Speech_Acoustics"},{"id":36835,"name":"Speech Processing","url":"https://www.academia.edu/Documents/in/Speech_Processing"},{"id":40738,"name":"Signal Analysis","url":"https://www.academia.edu/Documents/in/Signal_Analysis"},{"id":82241,"name":"Cramer Rao Lower Bound","url":"https://www.academia.edu/Documents/in/Cramer_Rao_Lower_Bound"},{"id":139657,"name":"Frame Analysis","url":"https://www.academia.edu/Documents/in/Frame_Analysis"},{"id":279495,"name":"Robustness","url":"https://www.academia.edu/Documents/in/Robustness"},{"id":299796,"name":"Speech Coding","url":"https://www.academia.edu/Documents/in/Speech_Coding"},{"id":368258,"name":"Speech analysis","url":"https://www.academia.edu/Documents/in/Speech_analysis"},{"id":375860,"name":"Frequency Estimation","url":"https://www.academia.edu/Documents/in/Frequency_Estimation"},{"id":582384,"name":"Gaussian noise","url":"https://www.academia.edu/Documents/in/Gaussian_noise"}],"urls":[{"id":7294929,"url":"http://dx.doi.org/10.1109/icassp.2010.5495700"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799219"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799219/Mod%C3%A8les_GMM_et_algorithme_de_brandt_pour_la_correction_de_la_segmentation_de_la_parole_par_HMM"><img alt="Research paper thumbnail of Modèles GMM et algorithme de brandt pour la correction de la segmentation de la parole par HMM" class="work-thumbnail" src="https://attachments.academia-assets.com/47073268/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799219/Mod%C3%A8les_GMM_et_algorithme_de_brandt_pour_la_correction_de_la_segmentation_de_la_parole_par_HMM">Modèles GMM et algorithme de brandt pour la correction de la segmentation de la parole par HMM</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="d5694fa71543928f2cd0166fe65c051d" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073268,"asset_id":26799219,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073268/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799219"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799219"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799219; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799219]").text(description); $(".js-view-count[data-work-id=26799219]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799219; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799219']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799219, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "d5694fa71543928f2cd0166fe65c051d" } } $('.js-work-strip[data-work-id=26799219]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799219,"title":"Modèles GMM et algorithme de brandt pour la correction de la segmentation de la parole par HMM","translated_title":"","metadata":{"grobid_abstract":"On compare les performances de deux algorithmes de segmentation automatique. Le premier, nommé \"HMM amélioré\", affine la segmentation produite par les modèles de Markov cachés (HMM). Le deuxième est l'algorithme de Brandt qui vise, quantà lui,à détecter les ruptures de stationnarité. Le premier algorithme requiert la connaissance a priori de la phonétisation, le second non.Étant donné que l'algorithme de Brandt commet des insertions et des omissions, ce qui n'est pas le cas du HMM amélioré, on introduit une généralisation du taux de segmentation correcte (TSC) afin de comparer ces deux algorithmes. Les mesures expérimentales des TSCs permettent d'évaluer une limite supérieure des performances de l'algorithme de Brandt et suggèrent de combiner ces deux méthodes avec d'autres algorithmes adaptésà la séparation des classes acoustico-phonétiques.","publication_date":{"day":null,"month":null,"year":2005,"errors":{}},"grobid_abstract_attachment_id":47073268},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799219/Mod%C3%A8les_GMM_et_algorithme_de_brandt_pour_la_correction_de_la_segmentation_de_la_parole_par_HMM","translated_internal_url":"","created_at":"2016-07-07T00:54:22.765-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073268,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073268/thumbnails/1.jpg","file_name":"Modles_GMM_et_algorithme_de_brandt_pour_20160707-9766-duztni.pdf","download_url":"https://www.academia.edu/attachments/47073268/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Modeles_GMM_et_algorithme_de_brandt_pour.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073268/Modles_GMM_et_algorithme_de_brandt_pour_20160707-9766-duztni-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DModeles_GMM_et_algorithme_de_brandt_pour.pdf\u0026Expires=1732441119\u0026Signature=ge4zZuFVhsAk1Yoyc49LIJFwShEBeDk24yXO-4uRed-aCD4M0qDmw5ZKY5YZrYJgL7183R9TLQX6~2rFWhhn2d8jNCWdyrluX7LkkoVJlyAeyw1r4deg2BKLh6TcWiKMVvUiCCzJgyKMyFbTEPrEiU8KPFL9y8IiXv-dw4mZhJBxZqTvlWZCocGLgmrh2ODyiWBUyC2nru2O14gynYHwBe9b8z2MO8dzjRxmgQq7INnTNoGyPcaWj7xULtPsX6Th2wN2IFA5bN3gUiYbIWyvz3FDHkdY-jzCh915DEets9ly~6ws3S1P6CfE67u-PFbWcY7l5jqKciOGBOAk18DhKQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Modèles_GMM_et_algorithme_de_brandt_pour_la_correction_de_la_segmentation_de_la_parole_par_HMM","translated_slug":"","page_count":4,"language":"fr","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073268,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073268/thumbnails/1.jpg","file_name":"Modles_GMM_et_algorithme_de_brandt_pour_20160707-9766-duztni.pdf","download_url":"https://www.academia.edu/attachments/47073268/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Modeles_GMM_et_algorithme_de_brandt_pour.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073268/Modles_GMM_et_algorithme_de_brandt_pour_20160707-9766-duztni-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DModeles_GMM_et_algorithme_de_brandt_pour.pdf\u0026Expires=1732441119\u0026Signature=ge4zZuFVhsAk1Yoyc49LIJFwShEBeDk24yXO-4uRed-aCD4M0qDmw5ZKY5YZrYJgL7183R9TLQX6~2rFWhhn2d8jNCWdyrluX7LkkoVJlyAeyw1r4deg2BKLh6TcWiKMVvUiCCzJgyKMyFbTEPrEiU8KPFL9y8IiXv-dw4mZhJBxZqTvlWZCocGLgmrh2ODyiWBUyC2nru2O14gynYHwBe9b8z2MO8dzjRxmgQq7INnTNoGyPcaWj7xULtPsX6Th2wN2IFA5bN3gUiYbIWyvz3FDHkdY-jzCh915DEets9ly~6ws3S1P6CfE67u-PFbWcY7l5jqKciOGBOAk18DhKQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799218"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799218/Robust_Full_band_Adaptive_Sinusoidal_Analysis_of_Speech"><img alt="Research paper thumbnail of Robust Full-band Adaptive Sinusoidal Analysis of Speech" class="work-thumbnail" src="https://attachments.academia-assets.com/47073267/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799218/Robust_Full_band_Adaptive_Sinusoidal_Analysis_of_Speech">Robust Full-band Adaptive Sinusoidal Analysis of Speech</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="d2b92f587a096b0543425350ec0c2a8d" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073267,"asset_id":26799218,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073267/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799218"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799218"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799218; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799218]").text(description); $(".js-view-count[data-work-id=26799218]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799218; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799218']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799218, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "d2b92f587a096b0543425350ec0c2a8d" } } $('.js-work-strip[data-work-id=26799218]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799218,"title":"Robust Full-band Adaptive Sinusoidal Analysis of Speech","translated_title":"","metadata":{"grobid_abstract":"Recent advances in speech analysis have shown that voiced speech can be very well represented using quasi-harmonic frequency tracks and local parameter adaptivity to the underlying signal. In this paper, we revisit the quasi-harmonicity approach through the extended adaptive Quasi-Harmonic Model-eaQHM, and we show that the application of a continuous f0 estimation method plus an adaptivity scheme can yield high resolution quasi-harmonic analysis and perceptually indistinguishable resynthesized speech. This method assumes an initial harmonic model which successively converges to quasi-harmonicity. Formal listening tests showed that eaQHM is robust against f0 estimation artefacts and can provide a higher quality in resynthesizing speech, compared to a recently developed model, called the adaptive Harmonic Model (aHM), and the standard Sinusoidal Model (SM).","publication_date":{"day":4,"month":5,"year":2014,"errors":{}},"grobid_abstract_attachment_id":47073267},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799218/Robust_Full_band_Adaptive_Sinusoidal_Analysis_of_Speech","translated_internal_url":"","created_at":"2016-07-07T00:54:22.011-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073267,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073267/thumbnails/1.jpg","file_name":"Robust_Full-band_Adaptive_Sinusoidal_Ana20160707-26160-slt3of.pdf","download_url":"https://www.academia.edu/attachments/47073267/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Robust_Full_band_Adaptive_Sinusoidal_Ana.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073267/Robust_Full-band_Adaptive_Sinusoidal_Ana20160707-26160-slt3of-libre.pdf?1467878543=\u0026response-content-disposition=attachment%3B+filename%3DRobust_Full_band_Adaptive_Sinusoidal_Ana.pdf\u0026Expires=1732441119\u0026Signature=FeHIENqDipGDcLOM9qd~u6rQF9qF9nYqNG1IJn5F3L8rQPzmNG6DjmSh0fPd315fdQ5uLHD1Oge7HNoRQ1yFJoxJ9bJH9~ZOiVlXl~vjTCfiE1zCu8wPQQHFflGGlE1yW9L8osEYig28SZjjTSiDCBTR6goeDyPZC4NaZ97deIc2sXZnU-r0eOLvSTktnO0i2cmpqCWmjNM~tDIfhA3hWMWlrsq3ePVrp9RGcObNxYi3pUu-NRhVokoerslYqHhu0a3tSsMKERCsfhnsjPJSCdiXanXeqKZVHy81LHF1S7~84bMCJHklzUlZV6kDk426HVoQVzyucYXw0tk9U5cz0A__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Robust_Full_band_Adaptive_Sinusoidal_Analysis_of_Speech","translated_slug":"","page_count":5,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073267,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073267/thumbnails/1.jpg","file_name":"Robust_Full-band_Adaptive_Sinusoidal_Ana20160707-26160-slt3of.pdf","download_url":"https://www.academia.edu/attachments/47073267/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Robust_Full_band_Adaptive_Sinusoidal_Ana.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073267/Robust_Full-band_Adaptive_Sinusoidal_Ana20160707-26160-slt3of-libre.pdf?1467878543=\u0026response-content-disposition=attachment%3B+filename%3DRobust_Full_band_Adaptive_Sinusoidal_Ana.pdf\u0026Expires=1732441119\u0026Signature=FeHIENqDipGDcLOM9qd~u6rQF9qF9nYqNG1IJn5F3L8rQPzmNG6DjmSh0fPd315fdQ5uLHD1Oge7HNoRQ1yFJoxJ9bJH9~ZOiVlXl~vjTCfiE1zCu8wPQQHFflGGlE1yW9L8osEYig28SZjjTSiDCBTR6goeDyPZC4NaZ97deIc2sXZnU-r0eOLvSTktnO0i2cmpqCWmjNM~tDIfhA3hWMWlrsq3ePVrp9RGcObNxYi3pUu-NRhVokoerslYqHhu0a3tSsMKERCsfhnsjPJSCdiXanXeqKZVHy81LHF1S7~84bMCJHklzUlZV6kDk426HVoQVzyucYXw0tk9U5cz0A__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799217"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799217/Cooperation_between_global_and_local_methods_for_the_automatic_segmentation_of_speech_synthesis_corpora"><img alt="Research paper thumbnail of Cooperation between global and local methods for the automatic segmentation of speech synthesis corpora" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799217/Cooperation_between_global_and_local_methods_for_the_automatic_segmentation_of_speech_synthesis_corpora">Cooperation between global and local methods for the automatic segmentation of speech synthesis corpora</a></div><div class="wp-workCard_item"><span>Interspeech</span><span>, 2006</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799217"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799217"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799217; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799217]").text(description); $(".js-view-count[data-work-id=26799217]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799217; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799217']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799217, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799217]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799217,"title":"Cooperation between global and local methods for the automatic segmentation of speech synthesis corpora","translated_title":"","metadata":{"publication_date":{"day":null,"month":null,"year":2006,"errors":{}},"publication_name":"Interspeech"},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799217/Cooperation_between_global_and_local_methods_for_the_automatic_segmentation_of_speech_synthesis_corpora","translated_internal_url":"","created_at":"2016-07-07T00:54:21.803-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Cooperation_between_global_and_local_methods_for_the_automatic_segmentation_of_speech_synthesis_corpora","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294928,"url":"http://informatik.uni-trier.de/~ley/db/conf/interspeech/interspeech2006.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799216"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799216/A_new_method_for_pitch_prediction_from_spectral_envelope_and_its_application_in_voice_conversion"><img alt="Research paper thumbnail of A new method for pitch prediction from spectral envelope and its application in voice conversion" class="work-thumbnail" src="https://attachments.academia-assets.com/47073265/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799216/A_new_method_for_pitch_prediction_from_spectral_envelope_and_its_application_in_voice_conversion">A new method for pitch prediction from spectral envelope and its application in voice conversion</a></div><div class="wp-workCard_item"><span>Eurospeech</span><span>, 2003</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="8f2e0063d19df1080f1a5134568e165b" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073265,"asset_id":26799216,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073265/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799216"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799216"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799216; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799216]").text(description); $(".js-view-count[data-work-id=26799216]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799216; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799216']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799216, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "8f2e0063d19df1080f1a5134568e165b" } } $('.js-work-strip[data-work-id=26799216]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799216,"title":"A new method for pitch prediction from spectral envelope and its application in voice conversion","translated_title":"","metadata":{"grobid_abstract":"support vector machine, SVM, speaker identification, speaker verification, KL divergence, Kullback-Leibler divergence, probabilistic distance kernels, multimedia One major SVM weakness has been the use of generic kernel functions to compute distances among data points. Polynomial, linear, and Gaussian are typical examples. They do not take full advantage of the inherent probability distributions of the data. Focusing on audio speaker identification and verification, we propose to explore the use of novel kernel functions that take full advantage of good probabilistic and descriptive models of audio data. We explore the use of generative speaker identification models such as Gaussian Mixture Models and derive a kernel distance based on the Kullback-Leibler (KL) divergence between generative models. In effect our approach combines the best of both generative and discriminative methods. Our results show that these new kernels perform as well as baseline GMM classifiers and outperform generic kernel based SVM's in both speaker identification and verification on two different audio databases.","publication_date":{"day":null,"month":null,"year":2003,"errors":{}},"publication_name":"Eurospeech","grobid_abstract_attachment_id":47073265},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799216/A_new_method_for_pitch_prediction_from_spectral_envelope_and_its_application_in_voice_conversion","translated_internal_url":"","created_at":"2016-07-07T00:54:21.585-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073265,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073265/thumbnails/1.jpg","file_name":"A_new_SVM_approach_to_speaker_identifica20160707-11064-1p1fqr3.pdf","download_url":"https://www.academia.edu/attachments/47073265/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"A_new_method_for_pitch_prediction_from_s.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073265/A_new_SVM_approach_to_speaker_identifica20160707-11064-1p1fqr3-libre.pdf?1467878533=\u0026response-content-disposition=attachment%3B+filename%3DA_new_method_for_pitch_prediction_from_s.pdf\u0026Expires=1732441119\u0026Signature=XuZAtaG751Sh2fWRXb5XDCc7ZZ9Sx5xil3w3wEmaeu~WNYwcAEnL5Zs851Z--eRA24hHpWnp7Wm9dIYUtjJNYmW218sdUPVTWTdQCVDNXG5d0UHU~IUBaObsOOi3HnpR~etK0G5XEyCppV0Vil3Cgsb8DM2PHiHuVtR71AlaONYTN-Gz3GtSpOR78SPIT5FGtQ~YVubHsikqUiQgJioft8Ns4qkD0gAT2SuA7rTq3zAVdW5RF6nLBU88SqJeayooljb45VKXNZ9KW4kntRmoSAbvied-hNoNssZifoQ~7Enoxmhvclo5B83pTVgQPqWCzGoL~QNgn8x2GvNSb0zX~Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"A_new_method_for_pitch_prediction_from_spectral_envelope_and_its_application_in_voice_conversion","translated_slug":"","page_count":10,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073265,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073265/thumbnails/1.jpg","file_name":"A_new_SVM_approach_to_speaker_identifica20160707-11064-1p1fqr3.pdf","download_url":"https://www.academia.edu/attachments/47073265/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"A_new_method_for_pitch_prediction_from_s.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073265/A_new_SVM_approach_to_speaker_identifica20160707-11064-1p1fqr3-libre.pdf?1467878533=\u0026response-content-disposition=attachment%3B+filename%3DA_new_method_for_pitch_prediction_from_s.pdf\u0026Expires=1732441119\u0026Signature=XuZAtaG751Sh2fWRXb5XDCc7ZZ9Sx5xil3w3wEmaeu~WNYwcAEnL5Zs851Z--eRA24hHpWnp7Wm9dIYUtjJNYmW218sdUPVTWTdQCVDNXG5d0UHU~IUBaObsOOi3HnpR~etK0G5XEyCppV0Vil3Cgsb8DM2PHiHuVtR71AlaONYTN-Gz3GtSpOR78SPIT5FGtQ~YVubHsikqUiQgJioft8Ns4qkD0gAT2SuA7rTq3zAVdW5RF6nLBU88SqJeayooljb45VKXNZ9KW4kntRmoSAbvied-hNoNssZifoQ~7Enoxmhvclo5B83pTVgQPqWCzGoL~QNgn8x2GvNSb0zX~Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[],"urls":[{"id":7294927,"url":"http://informatik.uni-trier.de/~ley/db/conf/interspeech/interspeech2003.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799215"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799215/Method_and_System_for_the_Quick_Conversion_of_a_Voice_Signal"><img alt="Research paper thumbnail of Method and System for the Quick Conversion of a Voice Signal" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799215/Method_and_System_for_the_Quick_Conversion_of_a_Voice_Signal">Method and System for the Quick Conversion of a Voice Signal</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799215"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799215"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799215; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799215]").text(description); $(".js-view-count[data-work-id=26799215]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799215; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799215']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799215, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799215]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799215,"title":"Method and System for the Quick Conversion of a Voice Signal","translated_title":"","metadata":{"publication_date":{"day":13,"month":12,"year":2006,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799215/Method_and_System_for_the_Quick_Conversion_of_a_Voice_Signal","translated_internal_url":"","created_at":"2016-07-07T00:54:21.371-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Method_and_System_for_the_Quick_Conversion_of_a_Voice_Signal","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[{"id":28235,"name":"Multidisciplinary","url":"https://www.academia.edu/Documents/in/Multidisciplinary"}],"urls":[{"id":7294926,"url":"http://www.freepatentsonline.com/EP1730728.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799214"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799214/On_the_properties_of_a_time_varying_quasi_harmonic_model_of_speech"><img alt="Research paper thumbnail of On the properties of a time-varying quasi-harmonic model of speech" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799214/On_the_properties_of_a_time_varying_quasi_harmonic_model_of_speech">On the properties of a time-varying quasi-harmonic model of speech</a></div><div class="wp-workCard_item"><span>Interspeech</span><span>, 2008</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799214"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799214"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799214; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799214]").text(description); $(".js-view-count[data-work-id=26799214]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799214; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799214']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799214, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799214]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799214,"title":"On the properties of a time-varying quasi-harmonic model of speech","translated_title":"","metadata":{"publication_date":{"day":null,"month":null,"year":2008,"errors":{}},"publication_name":"Interspeech"},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799214/On_the_properties_of_a_time_varying_quasi_harmonic_model_of_speech","translated_internal_url":"","created_at":"2016-07-07T00:54:21.160-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"On_the_properties_of_a_time_varying_quasi_harmonic_model_of_speech","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294925,"url":"http://informatik.uni-trier.de/~ley/db/conf/interspeech/interspeech2008.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799213"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799213/Voice_signal_conversation_method_and_system"><img alt="Research paper thumbnail of Voice signal conversation method and system" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799213/Voice_signal_conversation_method_and_system">Voice signal conversation method and system</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799213"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799213"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799213; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799213]").text(description); $(".js-view-count[data-work-id=26799213]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799213; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799213']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799213, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799213]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799213,"title":"Voice signal conversation method and system","translated_title":"","metadata":{"publication_date":{"day":27,"month":7,"year":2010,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799213/Voice_signal_conversation_method_and_system","translated_internal_url":"","created_at":"2016-07-07T00:54:20.942-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Voice_signal_conversation_method_and_system","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294924,"url":"http://www.freepatentsonline.com/7765101.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799212"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799212/Alleviating_the_one_to_many_mapping_problem_in_voice_conversion_with_context_dependent_modeling"><img alt="Research paper thumbnail of Alleviating the one-to-many mapping problem in voice conversion with context-dependent modeling" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799212/Alleviating_the_one_to_many_mapping_problem_in_voice_conversion_with_context_dependent_modeling">Alleviating the one-to-many mapping problem in voice conversion with context-dependent modeling</a></div><div class="wp-workCard_item"><span>Interspeech</span><span>, 2009</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799212"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799212"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799212; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799212]").text(description); $(".js-view-count[data-work-id=26799212]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799212; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799212']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799212, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799212]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799212,"title":"Alleviating the one-to-many mapping problem in voice conversion with context-dependent modeling","translated_title":"","metadata":{"publication_date":{"day":null,"month":null,"year":2009,"errors":{}},"publication_name":"Interspeech"},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799212/Alleviating_the_one_to_many_mapping_problem_in_voice_conversion_with_context_dependent_modeling","translated_internal_url":"","created_at":"2016-07-07T00:54:20.737-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Alleviating_the_one_to_many_mapping_problem_in_voice_conversion_with_context_dependent_modeling","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294923,"url":"http://informatik.uni-trier.de/~ley/db/conf/interspeech/interspeech2009.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799211"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/26799211/Brandts_GLR_method_and_refined_HMM_segmentation_for_TTS_synthesis_application"><img alt="Research paper thumbnail of Brandt's GLR method & refined HMM segmentation for TTS synthesis application" class="work-thumbnail" src="https://attachments.academia-assets.com/47073259/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/26799211/Brandts_GLR_method_and_refined_HMM_segmentation_for_TTS_synthesis_application">Brandt's GLR method & refined HMM segmentation for TTS synthesis application</a></div><div class="wp-workCard_item"><span>2005 13th European Signal Processing Conference</span><span>, Sep 1, 2005</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="5cdea5c090e86e65575f39b6925c6899" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47073259,"asset_id":26799211,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47073259/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799211"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799211"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799211; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799211]").text(description); $(".js-view-count[data-work-id=26799211]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799211; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799211']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799211, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "5cdea5c090e86e65575f39b6925c6899" } } $('.js-work-strip[data-work-id=26799211]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799211,"title":"Brandt's GLR method \u0026 refined HMM segmentation for TTS synthesis application","translated_title":"","metadata":{"grobid_abstract":"In comparison with standard HMM (Hidden Markov Model) with forced alignment, this paper discusses two automatic segmentation algorithms from different points of view: the probabilities of insertion and omission, and the accuracy. The first algorithm, hereafter named the refined HMM algorithm, aims at refining the segmentation performed by standard HMM via a GMM (Gaussian Mixture Model) of each boundary. The second is the Brandt's GLR (Generalized Likelihood Ratio) method. Its goal is to detect signal discontinuities. Provided that the sequence of speech units is known, the experimental results presented in this paper suggest in combining the refined HMM algorithm with Brandt's GLR method and other algorithms adapted to the detection of boundaries between known acoustic classes.","publication_date":{"day":1,"month":9,"year":2005,"errors":{}},"publication_name":"2005 13th European Signal Processing Conference","grobid_abstract_attachment_id":47073259},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799211/Brandts_GLR_method_and_refined_HMM_segmentation_for_TTS_synthesis_application","translated_internal_url":"","created_at":"2016-07-07T00:54:20.525-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47073259,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073259/thumbnails/1.jpg","file_name":"Brandt_s_GLR_Method__Refined_HMM_Segment20160707-30867-1fltrmc.pdf","download_url":"https://www.academia.edu/attachments/47073259/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Brandts_GLR_method_and_refined_HMM_segme.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073259/Brandt_s_GLR_Method__Refined_HMM_Segment20160707-30867-1fltrmc-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DBrandts_GLR_method_and_refined_HMM_segme.pdf\u0026Expires=1732441119\u0026Signature=Dnh2-sLoMB1LuVu77CM7TVxKJAoq4tixCMPehQw5qzDYUsByPeO8Pmif8FbwxOX08QUxzWZfpsPrEZhZXcFYMsBkWWfK30XGm1XaN3PmsNZbls~J8nbEDDkgO9fF~aVNEaopIPMX3ONUEPlLERU60PHme4rkvo1u5McbaOziVIOnJRieQUmfRtQe3sAPL5jtmosl3bu1FrAM3xnNPqZunbME0imOvyHnv~8QFlMp19cM5d4OPrMYXpmAGaNM5CFfQRXeLxBHgTMlV72z7Bl32jW3aO92gAKJ5AT2jNp6W7DunW2V37U7QUzsUF4-noYqGoW1tRUCclshiAuSY4k3AQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Brandts_GLR_method_and_refined_HMM_segmentation_for_TTS_synthesis_application","translated_slug":"","page_count":4,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[{"id":47073259,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47073259/thumbnails/1.jpg","file_name":"Brandt_s_GLR_Method__Refined_HMM_Segment20160707-30867-1fltrmc.pdf","download_url":"https://www.academia.edu/attachments/47073259/download_file?st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&st=MTczMjQzNzUxOSw4LjIyMi4yMDguMTQ2&","bulk_download_file_name":"Brandts_GLR_method_and_refined_HMM_segme.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47073259/Brandt_s_GLR_Method__Refined_HMM_Segment20160707-30867-1fltrmc-libre.pdf?1467878532=\u0026response-content-disposition=attachment%3B+filename%3DBrandts_GLR_method_and_refined_HMM_segme.pdf\u0026Expires=1732441119\u0026Signature=Dnh2-sLoMB1LuVu77CM7TVxKJAoq4tixCMPehQw5qzDYUsByPeO8Pmif8FbwxOX08QUxzWZfpsPrEZhZXcFYMsBkWWfK30XGm1XaN3PmsNZbls~J8nbEDDkgO9fF~aVNEaopIPMX3ONUEPlLERU60PHme4rkvo1u5McbaOziVIOnJRieQUmfRtQe3sAPL5jtmosl3bu1FrAM3xnNPqZunbME0imOvyHnv~8QFlMp19cM5d4OPrMYXpmAGaNM5CFfQRXeLxBHgTMlV72z7Bl32jW3aO92gAKJ5AT2jNp6W7DunW2V37U7QUzsUF4-noYqGoW1tRUCclshiAuSY4k3AQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":499,"name":"Acoustics","url":"https://www.academia.edu/Documents/in/Acoustics"},{"id":42799,"name":"Speech","url":"https://www.academia.edu/Documents/in/Speech"},{"id":68937,"name":"Hidden Markov Models","url":"https://www.academia.edu/Documents/in/Hidden_Markov_Models"},{"id":143539,"name":"hidden Markov model","url":"https://www.academia.edu/Documents/in/hidden_Markov_model"},{"id":220049,"name":"Accuracy","url":"https://www.academia.edu/Documents/in/Accuracy"},{"id":327120,"name":"Gaussian Mixture Model","url":"https://www.academia.edu/Documents/in/Gaussian_Mixture_Model"},{"id":383728,"name":"Vectors","url":"https://www.academia.edu/Documents/in/Vectors"},{"id":892890,"name":"Point of View","url":"https://www.academia.edu/Documents/in/Point_of_View"}],"urls":[{"id":7294922,"url":"http://ieeexplore.ieee.org/xpls/abs_all.jsp?arnumber=7078195"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799210"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799210/Modification_of_a_voice_signal"><img alt="Research paper thumbnail of Modification of a voice signal" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799210/Modification_of_a_voice_signal">Modification of a voice signal</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799210"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799210"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799210; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799210]").text(description); $(".js-view-count[data-work-id=26799210]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799210; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799210']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799210, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799210]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799210,"title":"Modification of a voice signal","translated_title":"","metadata":{"publication_date":{"day":17,"month":3,"year":2010,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799210/Modification_of_a_voice_signal","translated_internal_url":"","created_at":"2016-07-07T00:54:20.309-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Modification_of_a_voice_signal","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294921,"url":"http://www.freepatentsonline.com/EP1944755.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="26799209"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/26799209/Method_and_Device_for_Selecting_Acoustic_Units_and_a_Voice_Synthesis_Device"><img alt="Research paper thumbnail of Method and Device for Selecting Acoustic Units and a Voice Synthesis Device" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" rel="nofollow" href="https://www.academia.edu/26799209/Method_and_Device_for_Selecting_Acoustic_Units_and_a_Voice_Synthesis_Device">Method and Device for Selecting Acoustic Units and a Voice Synthesis Device</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="26799209"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span><span id="work-strip-rankings-button-container"></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="26799209"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 26799209; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=26799209]").text(description); $(".js-view-count[data-work-id=26799209]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 26799209; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='26799209']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span><span><script>$(function() { new Works.PaperRankView({ workId: 26799209, container: "", }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=26799209]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":26799209,"title":"Method and Device for Selecting Acoustic Units and a Voice Synthesis Device","translated_title":"","metadata":{"publication_date":{"day":20,"month":1,"year":2010,"errors":{}}},"translated_abstract":null,"internal_url":"https://www.academia.edu/26799209/Method_and_Device_for_Selecting_Acoustic_Units_and_a_Voice_Synthesis_Device","translated_internal_url":"","created_at":"2016-07-07T00:54:20.076-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":35678791,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Method_and_Device_for_Selecting_Acoustic_Units_and_a_Voice_Synthesis_Device","translated_slug":"","page_count":null,"language":"en","content_type":"Work","owner":{"id":35678791,"first_name":"Olivier","middle_initials":null,"last_name":"Rosec","page_name":"OlivierRosec","domain_name":"independent","created_at":"2015-10-05T05:47:18.949-07:00","display_name":"Olivier Rosec","url":"https://independent.academia.edu/OlivierRosec"},"attachments":[],"research_interests":[],"urls":[{"id":7294920,"url":"http://www.freepatentsonline.com/EP1789953.html"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> </div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js","https://a.academia-assets.com/assets/google_contacts-0dfb882d836b94dbcb4a2d123d6933fc9533eda5be911641f20b4eb428429600.js"], function() { // from javascript_helper.rb $('.js-google-connect-button').click(function(e) { e.preventDefault(); GoogleContacts.authorize_and_show_contacts(); Aedu.Dismissibles.recordClickthrough("WowProfileImportContactsPrompt"); }); $('.js-update-biography-button').click(function(e) { e.preventDefault(); Aedu.Dismissibles.recordClickthrough("UpdateUserBiographyPrompt"); $.ajax({ url: $r.api_v0_profiles_update_about_path({ subdomain_param: 'api', about: "", }), type: 'PUT', success: function(response) { location.reload(); } }); }); $('.js-work-creator-button').click(function (e) { e.preventDefault(); window.location = $r.upload_funnel_document_path({ source: encodeURIComponent(""), }); }); $('.js-video-upload-button').click(function (e) { e.preventDefault(); window.location = $r.upload_funnel_video_path({ source: encodeURIComponent(""), }); }); $('.js-do-this-later-button').click(function() { $(this).closest('.js-profile-nag-panel').remove(); Aedu.Dismissibles.recordDismissal("WowProfileImportContactsPrompt"); }); $('.js-update-biography-do-this-later-button').click(function(){ $(this).closest('.js-profile-nag-panel').remove(); Aedu.Dismissibles.recordDismissal("UpdateUserBiographyPrompt"); }); $('.wow-profile-mentions-upsell--close').click(function(){ $('.wow-profile-mentions-upsell--panel').hide(); Aedu.Dismissibles.recordDismissal("WowProfileMentionsUpsell"); }); $('.wow-profile-mentions-upsell--button').click(function(){ Aedu.Dismissibles.recordClickthrough("WowProfileMentionsUpsell"); }); new WowProfile.SocialRedesignUserWorks({ initialWorksOffset: 20, allWorksOffset: 20, maxSections: 1 }) }); </script> </div></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile_edit-5ea339ee107c863779f560dd7275595239fed73f1a13d279d2b599a28c0ecd33.js","https://a.academia-assets.com/assets/add_coauthor-22174b608f9cb871d03443cafa7feac496fb50d7df2d66a53f5ee3c04ba67f53.js","https://a.academia-assets.com/assets/tab-dcac0130902f0cc2d8cb403714dd47454f11fc6fb0e99ae6a0827b06613abc20.js","https://a.academia-assets.com/assets/wow_profile-f77ea15d77ce96025a6048a514272ad8becbad23c641fc2b3bd6e24ca6ff1932.js"], function() { // from javascript_helper.rb window.ae = window.ae || {}; window.ae.WowProfile = window.ae.WowProfile || {}; if(Aedu.User.current && Aedu.User.current.id === $viewedUser.id) { window.ae.WowProfile.current_user_edit = {}; new WowProfileEdit.EditUploadView({ el: '.js-edit-upload-button-wrapper', model: window.$current_user, }); new AddCoauthor.AddCoauthorsController(); } var userInfoView = new WowProfile.SocialRedesignUserInfo({ recaptcha_key: "6LdxlRMTAAAAADnu_zyLhLg0YF9uACwz78shpjJB" }); WowProfile.router = new WowProfile.Router({ userInfoView: userInfoView }); Backbone.history.start({ pushState: true, root: "/" + $viewedUser.page_name }); new WowProfile.UserWorksNav() }); </script> </div> <div class="bootstrap login"><div class="modal fade login-modal" id="login-modal"><div class="login-modal-dialog modal-dialog"><div class="modal-content"><div class="modal-header"><button class="close close" data-dismiss="modal" type="button"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button><h4 class="modal-title text-center"><strong>Log In</strong></h4></div><div class="modal-body"><div class="row"><div class="col-xs-10 col-xs-offset-1"><button class="btn btn-fb btn-lg btn-block btn-v-center-content" id="login-facebook-oauth-button"><svg style="float: left; width: 19px; line-height: 1em; margin-right: .3em;" aria-hidden="true" focusable="false" data-prefix="fab" data-icon="facebook-square" class="svg-inline--fa fa-facebook-square fa-w-14" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><path fill="currentColor" d="M400 32H48A48 48 0 0 0 0 80v352a48 48 0 0 0 48 48h137.25V327.69h-63V256h63v-54.64c0-62.15 37-96.48 93.67-96.48 27.14 0 55.52 4.84 55.52 4.84v61h-31.27c-30.81 0-40.42 19.12-40.42 38.73V256h68.78l-11 71.69h-57.78V480H400a48 48 0 0 0 48-48V80a48 48 0 0 0-48-48z"></path></svg><small><strong>Log in</strong> with <strong>Facebook</strong></small></button><br /><button class="btn btn-google btn-lg btn-block btn-v-center-content" id="login-google-oauth-button"><svg style="float: left; width: 22px; line-height: 1em; margin-right: .3em;" aria-hidden="true" focusable="false" data-prefix="fab" data-icon="google-plus" class="svg-inline--fa fa-google-plus fa-w-16" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M256,8C119.1,8,8,119.1,8,256S119.1,504,256,504,504,392.9,504,256,392.9,8,256,8ZM185.3,380a124,124,0,0,1,0-248c31.3,0,60.1,11,83,32.3l-33.6,32.6c-13.2-12.9-31.3-19.1-49.4-19.1-42.9,0-77.2,35.5-77.2,78.1S142.3,334,185.3,334c32.6,0,64.9-19.1,70.1-53.3H185.3V238.1H302.2a109.2,109.2,0,0,1,1.9,20.7c0,70.8-47.5,121.2-118.8,121.2ZM415.5,273.8v35.5H380V273.8H344.5V238.3H380V202.8h35.5v35.5h35.2v35.5Z"></path></svg><small><strong>Log in</strong> with <strong>Google</strong></small></button><br /><style type="text/css">.sign-in-with-apple-button { width: 100%; height: 52px; border-radius: 3px; border: 1px solid black; cursor: pointer; }</style><script src="https://appleid.cdn-apple.com/appleauth/static/jsapi/appleid/1/en_US/appleid.auth.js" type="text/javascript"></script><div class="sign-in-with-apple-button" data-border="false" data-color="white" id="appleid-signin"><span ="Sign Up with Apple" class="u-fs11"></span></div><script>AppleID.auth.init({ clientId: 'edu.academia.applesignon', scope: 'name email', redirectURI: 'https://www.academia.edu/sessions', state: "e1a5efc7415be19076f8655ba4a790574f2a7c4c33882cf989a6bf88e49648e8", });</script><script>// Hacky way of checking if on fast loswp if (window.loswp == null) { (function() { const Google = window?.Aedu?.Auth?.OauthButton?.Login?.Google; const Facebook = window?.Aedu?.Auth?.OauthButton?.Login?.Facebook; if (Google) { new Google({ el: '#login-google-oauth-button', rememberMeCheckboxId: 'remember_me', track: null }); } if (Facebook) { new Facebook({ el: '#login-facebook-oauth-button', rememberMeCheckboxId: 'remember_me', track: null }); } })(); }</script></div></div></div><div class="modal-body"><div class="row"><div class="col-xs-10 col-xs-offset-1"><div class="hr-heading login-hr-heading"><span class="hr-heading-text">or</span></div></div></div></div><div class="modal-body"><div class="row"><div class="col-xs-10 col-xs-offset-1"><form class="js-login-form" action="https://www.academia.edu/sessions" accept-charset="UTF-8" method="post"><input name="utf8" type="hidden" value="✓" autocomplete="off" /><input type="hidden" name="authenticity_token" value="vym/Ydv5C2lwMqfpDbJwKYVVgUOLgqMFWPF8rU1pKu7Sqq79dmuEYnU4Vib7H4td6TZ5a8Hq0YFVit9120YwRw==" autocomplete="off" /><div class="form-group"><label class="control-label" for="login-modal-email-input" style="font-size: 14px;">Email</label><input class="form-control" id="login-modal-email-input" name="login" type="email" /></div><div class="form-group"><label class="control-label" for="login-modal-password-input" style="font-size: 14px;">Password</label><input class="form-control" id="login-modal-password-input" name="password" type="password" /></div><input type="hidden" name="post_login_redirect_url" id="post_login_redirect_url" value="https://independent.academia.edu/OlivierRosec" autocomplete="off" /><div class="checkbox"><label><input type="checkbox" name="remember_me" id="remember_me" value="1" checked="checked" /><small style="font-size: 12px; margin-top: 2px; display: inline-block;">Remember me on this computer</small></label></div><br><input type="submit" name="commit" value="Log In" class="btn btn-primary btn-block btn-lg js-login-submit" data-disable-with="Log In" /></br></form><script>typeof window?.Aedu?.recaptchaManagedForm === 'function' && window.Aedu.recaptchaManagedForm( document.querySelector('.js-login-form'), document.querySelector('.js-login-submit') );</script><small style="font-size: 12px;"><br />or <a data-target="#login-modal-reset-password-container" data-toggle="collapse" href="javascript:void(0)">reset password</a></small><div class="collapse" id="login-modal-reset-password-container"><br /><div class="well margin-0x"><form class="js-password-reset-form" action="https://www.academia.edu/reset_password" accept-charset="UTF-8" method="post"><input name="utf8" type="hidden" value="✓" autocomplete="off" /><input type="hidden" name="authenticity_token" value="3aCof+9bwysJtXVp5coeS+1WBltghTYNW1FQaoc0WvKwI7njQslMIAy/hKYTZ+U/gTX+cyrtRIlWKvOyERtAWw==" autocomplete="off" /><p>Enter the email address you signed up with and we'll email you a reset link.</p><div class="form-group"><input class="form-control" name="email" type="email" /></div><script src="https://recaptcha.net/recaptcha/api.js" async defer></script> <script> var invisibleRecaptchaSubmit = function () { var closestForm = function (ele) { var curEle = ele.parentNode; while (curEle.nodeName !== 'FORM' && curEle.nodeName !== 'BODY'){ curEle = curEle.parentNode; } return curEle.nodeName === 'FORM' ? curEle : null }; var eles = document.getElementsByClassName('g-recaptcha'); if (eles.length > 0) { var form = closestForm(eles[0]); if (form) { form.submit(); } } }; </script> <input type="submit" data-sitekey="6Lf3KHUUAAAAACggoMpmGJdQDtiyrjVlvGJ6BbAj" data-callback="invisibleRecaptchaSubmit" class="g-recaptcha btn btn-primary btn-block" value="Email me a link" value=""/> </form></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/collapse-45805421cf446ca5adf7aaa1935b08a3a8d1d9a6cc5d91a62a2a3a00b20b3e6a.js"], function() { // from javascript_helper.rb $("#login-modal-reset-password-container").on("shown.bs.collapse", function() { $(this).find("input[type=email]").focus(); }); }); </script> </div></div></div><div class="modal-footer"><div class="text-center"><small style="font-size: 12px;">Need an account? <a rel="nofollow" href="https://www.academia.edu/signup">Click here to sign up</a></small></div></div></div></div></div></div><script>// If we are on subdomain or non-bootstrapped page, redirect to login page instead of showing modal (function(){ if (typeof $ === 'undefined') return; var host = window.location.hostname; if ((host === $domain || host === "www."+$domain) && (typeof $().modal === 'function')) { $("#nav_log_in").click(function(e) { // Don't follow the link and open the modal e.preventDefault(); $("#login-modal").on('shown.bs.modal', function() { $(this).find("#login-modal-email-input").focus() }).modal('show'); }); } })()</script> <div class="bootstrap" id="footer"><div class="footer-content clearfix text-center padding-top-7x" style="width:100%;"><ul class="footer-links-secondary footer-links-wide list-inline margin-bottom-1x"><li><a href="https://www.academia.edu/about">About</a></li><li><a href="https://www.academia.edu/press">Press</a></li><li><a rel="nofollow" href="https://medium.com/academia">Blog</a></li><li><a href="https://www.academia.edu/documents">Papers</a></li><li><a href="https://www.academia.edu/topics">Topics</a></li><li><a href="https://www.academia.edu/journals">Academia.edu Journals</a></li><li><a rel="nofollow" href="https://www.academia.edu/hiring"><svg style="width: 13px; height: 13px;" aria-hidden="true" focusable="false" data-prefix="fas" data-icon="briefcase" class="svg-inline--fa fa-briefcase fa-w-16" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M320 336c0 8.84-7.16 16-16 16h-96c-8.84 0-16-7.16-16-16v-48H0v144c0 25.6 22.4 48 48 48h416c25.6 0 48-22.4 48-48V288H320v48zm144-208h-80V80c0-25.6-22.4-48-48-48H176c-25.6 0-48 22.4-48 48v48H48c-25.6 0-48 22.4-48 48v80h512v-80c0-25.6-22.4-48-48-48zm-144 0H192V96h128v32z"></path></svg> <strong>We're Hiring!</strong></a></li><li><a rel="nofollow" href="https://support.academia.edu/"><svg style="width: 12px; height: 12px;" aria-hidden="true" focusable="false" data-prefix="fas" data-icon="question-circle" class="svg-inline--fa fa-question-circle fa-w-16" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M504 256c0 136.997-111.043 248-248 248S8 392.997 8 256C8 119.083 119.043 8 256 8s248 111.083 248 248zM262.655 90c-54.497 0-89.255 22.957-116.549 63.758-3.536 5.286-2.353 12.415 2.715 16.258l34.699 26.31c5.205 3.947 12.621 3.008 16.665-2.122 17.864-22.658 30.113-35.797 57.303-35.797 20.429 0 45.698 13.148 45.698 32.958 0 14.976-12.363 22.667-32.534 33.976C247.128 238.528 216 254.941 216 296v4c0 6.627 5.373 12 12 12h56c6.627 0 12-5.373 12-12v-1.333c0-28.462 83.186-29.647 83.186-106.667 0-58.002-60.165-102-116.531-102zM256 338c-25.365 0-46 20.635-46 46 0 25.364 20.635 46 46 46s46-20.636 46-46c0-25.365-20.635-46-46-46z"></path></svg> <strong>Help Center</strong></a></li></ul><ul class="footer-links-tertiary list-inline margin-bottom-1x"><li class="small">Find new research papers in:</li><li class="small"><a href="https://www.academia.edu/Documents/in/Physics">Physics</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Chemistry">Chemistry</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Biology">Biology</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Health_Sciences">Health Sciences</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Ecology">Ecology</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Earth_Sciences">Earth Sciences</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Cognitive_Science">Cognitive Science</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Mathematics">Mathematics</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Computer_Science">Computer Science</a></li></ul></div></div><div class="DesignSystem" id="credit" style="width:100%;"><ul class="u-pl0x footer-links-legal list-inline"><li><a rel="nofollow" href="https://www.academia.edu/terms">Terms</a></li><li><a rel="nofollow" href="https://www.academia.edu/privacy">Privacy</a></li><li><a rel="nofollow" href="https://www.academia.edu/copyright">Copyright</a></li><li>Academia ©2024</li></ul></div><script> //<![CDATA[ window.detect_gmtoffset = true; window.Academia && window.Academia.set_gmtoffset && Academia.set_gmtoffset('/gmtoffset'); //]]> </script> <div id='overlay_background'></div> <div id='bootstrap-modal-container' class='bootstrap'></div> <div id='ds-modal-container' class='bootstrap DesignSystem'></div> <div id='full-screen-modal'></div> </div> </body> </html>