CINXE.COM
Question Answering - Max Planck Institute for Informatics
<!DOCTYPE html> <html lang="en-US"> <head> <meta charset="utf-8"> <!-- This website is powered by TYPO3 - inspiring people to share! TYPO3 is a free open source Content Management Framework initially created by Kasper Skaarhoj and licensed under GNU/GPL. TYPO3 is copyright 1998-2024 of Kasper Skaarhoj. Extensions are copyright of their respective owners. Information and contribution at https://typo3.org/ --> <base href="https://www.mpi-inf.mpg.de/"> <link rel="icon" href="/typo3conf/ext/mpi_inf_site_package/Resources/Public/favicon.ico" type="image/vnd.microsoft.icon"> <title>Question Answering - Max Planck Institute for Informatics</title> <meta http-equiv="x-ua-compatible" content="IE=edge" /> <meta name="generator" content="TYPO3 CMS" /> <meta name="viewport" content="width=device-width, initial-scale=1, minimum-scale=1" /> <meta name="robots" content="index,follow" /> <meta name="twitter:card" content="summary" /> <meta name="apple-mobile-web-app-capable" content="no" /> <link rel="stylesheet" href="/typo3temp/assets/css/477c5e4dcc714b3ff0da8403884a460e.css?1697447411" media="all"> <link rel="stylesheet" href="/typo3conf/ext/mpi_inf_site_package/Resources/Public/Css/swiper-bundle-8.4.6.min.css?1725541122" media="screen"> <link rel="stylesheet" href="/typo3conf/ext/mpi_inf_site_package/Resources/Public/Css/style.css?1725541122" media="all"> <script src="/typo3conf/ext/mpi_inf_site_package/Resources/Public/JavaScript/Build/head.js?1725541122"></script> <style>.page-141 .news__categories { display: none !important; }</style> <link rel="canonical" href="https://www.mpi-inf.mpg.de/departments/databases-and-information-systems/research/question-answering"/> </head> <body id="p3605" class="page page-3605 page-lvl--4 lang--0 be-layout--standard layout--default dep--d5"><a name="top"></a> <div class="skip-link"> Skip to <a href="#main">main content</a> or <a href="#nav">main navigation</a> </div> <style> .department-lnk--d1 .department-lnk__icon { background: #a50d0f; } .department-lnk--d1.active, .department-lnk--d1:active, .department-lnk--d1:hover { background: #a50d0f; } .department-lnk--d1.active .department-lnk__id, .department-lnk--d1:active .department-lnk__id, .department-lnk--d1:hover .department-lnk__id { color: #a50d0f; } </style> <style> .department-lnk--d2 .department-lnk__icon { background: #db6413; } .department-lnk--d2.active, .department-lnk--d2:active, .department-lnk--d2:hover { background: #db6413; } .department-lnk--d2.active .department-lnk__id, .department-lnk--d2:active .department-lnk__id, .department-lnk--d2:hover .department-lnk__id { color: #db6413; } </style> <style> .department-lnk--d3 .department-lnk__icon { background: #758e25; } .department-lnk--d3.active, .department-lnk--d3:active, .department-lnk--d3:hover { background: #758e25; } .department-lnk--d3.active .department-lnk__id, .department-lnk--d3:active .department-lnk__id, .department-lnk--d3:hover .department-lnk__id { color: #758e25; } </style> <style> .department-lnk--d4 .department-lnk__icon { background: #0d82a1; } .department-lnk--d4.active, .department-lnk--d4:active, .department-lnk--d4:hover { background: #0d82a1; } .department-lnk--d4.active .department-lnk__id, .department-lnk--d4:active .department-lnk__id, .department-lnk--d4:hover .department-lnk__id { color: #0d82a1; } </style> <style> .department-lnk--d5 .department-lnk__icon { background: #87136e; } .department-lnk--d5.active, .department-lnk--d5:active, .department-lnk--d5:hover { background: #87136e; } .department-lnk--d5.active .department-lnk__id, .department-lnk--d5:active .department-lnk__id, .department-lnk--d5:hover .department-lnk__id { color: #87136e; } </style> <style> .department-lnk--d6 .department-lnk__icon { background: #03325d; } .department-lnk--d6.active, .department-lnk--d6:active, .department-lnk--d6:hover { background: #03325d; } .department-lnk--d6.active .department-lnk__id, .department-lnk--d6:active .department-lnk__id, .department-lnk--d6:hover .department-lnk__id { color: #03325d; } </style> <style> .department-lnk--rg1 .department-lnk__icon { background: #f0b006; } .department-lnk--rg1.active, .department-lnk--rg1:active, .department-lnk--rg1:hover { background: #f0b006; } .department-lnk--rg1.active .department-lnk__id, .department-lnk--rg1:active .department-lnk__id, .department-lnk--rg1:hover .department-lnk__id { color: #f0b006; } </style> <style> .department-lnk--rg2 .department-lnk__icon { background: #d7390b; } .department-lnk--rg2.active, .department-lnk--rg2:active, .department-lnk--rg2:hover { background: #d7390b; } .department-lnk--rg2.active .department-lnk__id, .department-lnk--rg2:active .department-lnk__id, .department-lnk--rg2:hover .department-lnk__id { color: #d7390b; } </style> <style> .department-lnk--rg3 .department-lnk__icon { background: #5aa1a8; } .department-lnk--rg3.active, .department-lnk--rg3:active, .department-lnk--rg3:hover { background: #5aa1a8; } .department-lnk--rg3.active .department-lnk__id, .department-lnk--rg3:active .department-lnk__id, .department-lnk--rg3:hover .department-lnk__id { color: #5aa1a8; } </style> <nav class="site__department-nav department-dd js-site-department-nav " title=""> <a class="department-dd__toggle js-site-department-dd-toggle" role="button" title="" href="javascript:"> <i class="mpg-icon mpg-icon-down2"></i> </a> <ul class="department-dd__menu"> <li> <a class="department-lnk department-lnk--home js-department-dd-lnk" href="/home"> <span class="department-lnk__ct"> <span class="department-lnk__icon"> <i class="mpg-icon mpg-icon-home"></i> </span> <span class="department-lnk__lbl"> Institute </span> </span> </a> </li> <li> <a class="department-lnk js-department-dd-lnk department-lnk--d1 " href="/departments/algorithms-complexity" target="" > <span class="department-lnk__ct"> <span class="department-lnk__icon"> <span class="department-lnk__id"> D1 </span> </span> <span class="department-lnk__lbl"> Algorithms and Complexity </span> </span> </a> </li> <li> <a class="department-lnk js-department-dd-lnk department-lnk--d2 " href="/departments/computer-vision-and-machine-learning" target="" > <span class="department-lnk__ct"> <span class="department-lnk__icon"> <span class="department-lnk__id"> D2 </span> </span> <span class="department-lnk__lbl"> Computer Vision and Machine Learning </span> </span> </a> </li> <li> <a class="department-lnk js-department-dd-lnk department-lnk--d3 " href="/departments/inet" target="" > <span class="department-lnk__ct"> <span class="department-lnk__icon"> <span class="department-lnk__id"> D3 </span> </span> <span class="department-lnk__lbl"> Internet Architecture </span> </span> </a> </li> <li> <a class="department-lnk js-department-dd-lnk department-lnk--d4 " href="/departments/computer-graphics" target="" > <span class="department-lnk__ct"> <span class="department-lnk__icon"> <span class="department-lnk__id"> D4 </span> </span> <span class="department-lnk__lbl"> Computer Graphics </span> </span> </a> </li> <li> <a class="department-lnk js-department-dd-lnk department-lnk--d5 active " href="/departments/databases-and-information-systems" target="" > <span class="department-lnk__ct"> <span class="department-lnk__icon"> <span class="department-lnk__id"> D5 </span> </span> <span class="department-lnk__lbl"> Databases and Information Systems </span> </span> </a> </li> <li> <a class="department-lnk js-department-dd-lnk department-lnk--d6 " href="/departments/visual-computing-and-artificial-intelligence" target="" > <span class="department-lnk__ct"> <span class="department-lnk__icon"> <span class="department-lnk__id"> D6 </span> </span> <span class="department-lnk__lbl"> Visual Computing and Artificial Intelligence </span> </span> </a> </li> <li> <a class="department-lnk js-department-dd-lnk department-lnk--rg1 " href="/departments/automation-of-logic" target="" > <span class="department-lnk__ct"> <span class="department-lnk__icon"> <span class="department-lnk__id"> RG1 </span> </span> <span class="department-lnk__lbl"> Automation of Logic </span> </span> </a> </li> <li> <a class="department-lnk js-department-dd-lnk department-lnk--rg2 " href="/departments/network-and-cloud-systems" target="" > <span class="department-lnk__ct"> <span class="department-lnk__icon"> <span class="department-lnk__id"> RG2 </span> </span> <span class="department-lnk__lbl"> Network and Cloud Systems </span> </span> </a> </li> <li> <a class="department-lnk js-department-dd-lnk department-lnk--rg3 " href="/departments/mlp" target="" > <span class="department-lnk__ct"> <span class="department-lnk__icon"> <span class="department-lnk__id"> RG3 </span> </span> <span class="department-lnk__lbl"> Multimodal Language Processing </span> </span> </a> </li> </ul> <div class="department-dd__close-ct"> <a class="department-dd__close js-site-department-nav-close-lnk" role="button" title="" href="javascript:"> <svg width="12px" height="12px" viewBox="0 0 12 12" stroke="white" stroke-width="1"> <line x1="1" y1="1" x2="11" y2="11"></line> <line x1="1" y1="11" x2="11" y2="1"></line> </svg> </a> </div> </nav> <header class="site-header"> <div class="site-header__ct"> <div class="site-header__mob-ct"> <div class="site-header__logo-lnk-ct"> <a class="site-header__logo-lnk" href="/home"> <img class="site-header__logo" alt="Logo Max Planck Institute for Informatics" src="/typo3conf/ext/mpi_inf_site_package/Resources/Public/Images/Core/mpi-logo-en.svg" width="1527" height="300" /> </a> <a class="site-header__department-lnk" href="/departments/databases-and-information-systems"> <span class="site-header__department-lbl"> Databases and Information Systems </span> </a> </div> <button class="site-header__nav-toggle js-toggle-mobile-nav" type="button" aria-label="Toggle navigation" > <svg width="40" height="40" viewBox="-25 -25 50 50"> <circle cx="0" cy="0" r="25" fill="none"></circle> <rect class="burgerline-1" x="-15" y="-12" width="30" height="4" fill="black"></rect> <rect class="burgerline-2" x="-15" y="-2" width="30" height="4" fill="black"></rect> <rect class="burgerline-3" x="-15" y="8" width="30" height="4" fill="black"></rect> </svg> </button> </div> <div class="site-header__mob-nav-ct js-mob-nav-ct"> <div class="site-header__mob-nav-ct-helper js-mob-nav-helper"> <div class="site-header__search"> <div class="tx-solr"> <div class="tx-solr-search-form"> <form method="get" id="tx-solr-search-form-pi-results" action="/search" data-suggest="/search?type=7384" data-suggest-header="Top Results" accept-charset="utf-8"> <input class="tx-solr-search-form__input js-solr-q" name="tx_solr[q]" placeholder="Search" value="" type="search" aria-label="Search" /> <button class="tx-solr-search-form__submit" aria-label="Search" type="submit"> <i class="mpg-icon mpg-icon-search" role="img" aria-hidden="true"></i> </button> </form> </div> </div> </div> <div class="site-header__main-nav"> <nav class="main-nav" id="nav"> <div class="main-nav__ct"> <ul class="main-nav__items"> <li class="main-nav__item js-main-nav-item has-children"> <a class=" has-children" href="/departments/databases-and-information-systems/people" target="" > People </a> <button class="main-nav__expand-btn js-main-nav-expand" title="Toggle dropdown"> <span class="main-nav__expand-btn-wrap"> <i class="mpg-icon mpg-icon-down2"></i> </span> </button> <div class="site-nav__sub-menu js-main-nav-sub-menu"> <div class="site-nav__sub-menu-wrap js-main-nav-sub-menu-wrap"> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/people/former-members-and-guests" target="" > Former Members and Guests </a> </li> </ul> </div> </div> </div> </li> <li class="main-nav__item js-main-nav-item has-children"> <a class="active has-children" href="/departments/databases-and-information-systems/research" target="" > Research </a> <button class="main-nav__expand-btn js-main-nav-expand" title="Toggle dropdown"> <span class="main-nav__expand-btn-wrap"> <i class="mpg-icon mpg-icon-down2"></i> </span> </button> <div class="site-nav__sub-menu js-main-nav-sub-menu"> <div class="site-nav__sub-menu-wrap js-main-nav-sub-menu-wrap"> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/commonsense" target="" > Commonsense Knowledge </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk active" href="/departments/databases-and-information-systems/research/question-answering" target="" > Question Answering </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/personal-knowledge" target="" > Personal Knowledge </a> </li> </ul> </div> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/neural-ir" target="" > Neural Information Retrieval </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/yago-naga" target="" > YAGO-NAGA </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/knowledge-base-recall" target="" > Knowledge Base Recall </a> </li> </ul> </div> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/google-award" target="" > Google Award </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/impact" target="" > imPACT </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/ambiversenlu" target="" > AmbiverseNLU </a> </li> </ul> </div> </div> </div> </li> <li class="main-nav__item js-main-nav-item has-children"> <a class=" has-children" href="/departments/databases-and-information-systems/teaching" target="" > Teaching </a> <button class="main-nav__expand-btn js-main-nav-expand" title="Toggle dropdown"> <span class="main-nav__expand-btn-wrap"> <i class="mpg-icon mpg-icon-down2"></i> </span> </button> <div class="site-nav__sub-menu js-main-nav-sub-menu"> <div class="site-nav__sub-menu-wrap js-main-nav-sub-menu-wrap"> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ss2022" target="" > Summer Semester 2022 </a> <ul class="sub-menu sub-menu--lvl-2"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ss2022/akbc" target="" > Automated Knowledge Base Construction </a> </li> </ul> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ws2021" target="" > Winter Semester 2020/21 </a> <ul class="sub-menu sub-menu--lvl-2"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ws2021/commonsense-knowledge-extraction-and-consolidation" target="" > Commonsense knowledge extraction and consolidation </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ws2021/selected-topics-in-question-answering" target="" > Selected Topics in Question Answering </a> </li> </ul> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/summer-semester-2020" target="" > Summer Semester 2020 </a> <ul class="sub-menu sub-menu--lvl-2"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/question-answering-systems" target="" > Question Answering Systems </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ss20/healthml" target="" > Machine Learning for Harvesting Health Knowledge </a> </li> </ul> </li> </ul> </div> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ws1920" target="" > Winter Semester 2019/20 </a> <ul class="sub-menu sub-menu--lvl-2"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ws1920/ie" target="" > Information extraction </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ws1920/irdm19" target="" > Information Retrieval and Data Mining </a> </li> </ul> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ss19" target="" > Summer Semester 2019 </a> <ul class="sub-menu sub-menu--lvl-2"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ss19/topics-in-neural-information-retrieval" target="" > Topics in Neural Information Retrieval </a> </li> </ul> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ws1819" target="" > Winter Semester 2018/19 </a> <ul class="sub-menu sub-menu--lvl-2"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ws1819/advanced-topics-in-knowledge-bases" target="" > Advanced Topics in Knowledge Bases </a> </li> </ul> </li> </ul> </div> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/summer-semester-2018" target="" > Summer Semester 2018 </a> <ul class="sub-menu sub-menu--lvl-2"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/ss18/thesis-seminar" target="" > Thesis Seminar </a> </li> </ul> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/winter-semester-201718" target="" > Winter Semester 2017/18 </a> <ul class="sub-menu sub-menu--lvl-2"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/winter-semester-201718/information-retrieval-and-data-mining" target="" > Information Retrieval and Data Mining </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/winter-semester-201718/tensors-in-data-analysis" target="" > Tensors in Data Analysis </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/winter-semester-201718/knowledge-bases" target="" > Knowledge Bases </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/teaching/winter-semester-201718/knowledge-representation-for-the-semantic-web" target="" > Knowledge Representation for the Semantic Web </a> </li> </ul> </li> </ul> </div> </div> </div> </li> <li class="main-nav__item js-main-nav-item "> <a class=" " href="/departments/databases-and-information-systems/news-events" target="" > News & Events </a> </li> <li class="main-nav__item js-main-nav-item has-children"> <a class=" has-children" href="/departments/databases-and-information-systems/publications" target="" > Publications </a> <button class="main-nav__expand-btn js-main-nav-expand" title="Toggle dropdown"> <span class="main-nav__expand-btn-wrap"> <i class="mpg-icon mpg-icon-down2"></i> </span> </button> <div class="site-nav__sub-menu js-main-nav-sub-menu"> <div class="site-nav__sub-menu-wrap js-main-nav-sub-menu-wrap"> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/publications/current-year" target="" > Current Year </a> </li> </ul> </div> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/publications/last-year" target="" > Last Year </a> </li> </ul> </div> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/publications/the-year-before-last" target="" > The Year Before Last </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/publications/research-reports" target="" > Research Reports </a> </li> </ul> </div> </div> </div> </li> <li class="main-nav__item js-main-nav-item has-children"> <a class=" has-children" href="/departments/databases-and-information-systems/software" target="" > Software </a> <button class="main-nav__expand-btn js-main-nav-expand" title="Toggle dropdown"> <span class="main-nav__expand-btn-wrap"> <i class="mpg-icon mpg-icon-down2"></i> </span> </button> <div class="site-nav__sub-menu js-main-nav-sub-menu"> <div class="site-nav__sub-menu-wrap js-main-nav-sub-menu-wrap"> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/ambiverse-nlu/aida" target="" > AIDA </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/ambiversenlu" target="" > AmbiverseNLU </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/ambiverse-nlu/clausie" target="" > ClausIE </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="http://resources.mpi-inf.mpg.de/d5/bingo/index.html" target="" > BINGO! </a> </li> </ul> </div> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/software/inex" target="" > INEX </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/software/mentor-lite" target="" > MENTOR-lite </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/software/mg-fsm" target="" > MG-FSM </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="http://resources.mpi-inf.mpg.de/d5/minerva/index.html" target="" > MINERVA </a> </li> </ul> </div> <div class="site-nav__sub-menu-column"> <ul class="sub-menu sub-menu--lvl-1"> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="http://code.google.com/p/rdf3x/" target="" > RDF-3X </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="https://topx.sourceforge.net/" target="" > TopX </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/software/tpdblearn" target="" > TPDBlearn </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/software/triad" target="" > TriAD </a> </li> <li class="sub-menu__itm"> <a class="sub-menu__lnk " href="/departments/databases-and-information-systems/research/yago-naga/yago" target="" > YAGO </a> </li> </ul> </div> </div> </div> </li> <li class="main-nav__item js-main-nav-item "> <a class=" " href="/departments/databases-and-information-systems/demo-systems" target="" > Demo Systems </a> </li> </ul> </div> </nav> </div> <div class="site-header__lang-switch"> <a title="Seite auf Deutsch anzeigen" href="/de/departments/databases-and-information-systems/research/question-answering">Deutsch</a> </div> </div> </div> </div> </header> <!--TYPO3SEARCH_begin--> <nav class="breadcrumb"> <div class="breadcrumb__ct"> <ol class="breadcrumb__items"> <li class="breadcrumb__item is-first " > <a class="breadcrumb__link" href="/departments" title="Departments"> <span class="breadcrumb__lbl">Departments</span> </a> </li> <li class="breadcrumb__item " > <a class="breadcrumb__link" href="/departments/databases-and-information-systems" title="Databases and Information Systems"> <span class="breadcrumb__lbl">Databases and Information Systems</span> </a> </li> <li class="breadcrumb__item is-sec-to-last " > <a class="breadcrumb__link" href="/departments/databases-and-information-systems/research" title="Research"> <span class="breadcrumb__lbl">Research</span> </a> </li> <li class="breadcrumb__item is-last is-active" > <span class="breadcrumb__lbl">Question Answering</span> </li> </ol> </div> </nav> <main id="main" class="site__body "> <div class="site__main"> <div id="c23847" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h1 class="content__ttl "> Question Answering </h1></header><div class="content__bd"><h3>Research Group Leader: <a href="http://people.mpi-inf.mpg.de/~rsaharo/" target="_blank" rel="noreferrer">Rishiraj Saha Roy</a> (Mentor: <a href="https://people.mpi-inf.mpg.de/~weikum/" target="_blank" rel="noreferrer">Gerhard Weikum</a>)</h3></div></div></div></div> <div id="c13239" class="content content--text content--layout-0 has-header frame--bg-boxed content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> Scope and Vision </h2></header><div class="content__bd"><p class="text-justify">Directly providing crisp responses to fact-based questions has become the de facto standard in Web search engines and digital assistants today. This saves users the trouble of browsing through one or more documents to locate the correct answers, or listening to long and verbose spoken responses. Our research designs mechanisms for enabling such direct answering over the Web by leveraging, but not being limited to, the power of large curated knowledge graphs. Our overarching goal is to provide <strong>robust</strong>, <strong>efficient</strong>, and <strong>interpretable</strong> solutions to the major tasks in this paradigm today: <strong>conversational</strong>, <strong>complex</strong>, and <strong>heterogeneous</strong> question answering. Read more <a href="https://link.springer.com/book/10.1007/978-3-031-79512-1" target="_blank" rel="noreferrer">here</a>. Explore our work <a href="https://qa.mpi-inf.mpg.de/projects/" target="_blank" rel="noreferrer">here</a>.</p><p class="text-justify">Please find the details of our projects and their associated publications listed below.</p><p class="text-justify"><strong>Project explorer: </strong><a href="https://qa.mpi-inf.mpg.de/projects/" target="_blank" rel="noreferrer">https://qa.mpi-inf.mpg.de/projects/</a> </p><p class="text-justify"><strong>Book: Rishiraj Saha Roy</strong> and Avishek Anand, <a href="https://link.springer.com/book/10.1007/978-3-031-79512-1" target="_blank" rel="noreferrer">Question Answering for the Curated Web: Tasks and Methods in QA over Knowledge Bases and Text Collections</a>, Springer, 2022.</p><hr><h2 class="text-justify">Courses</h2><div class="table-resp-ct" role="region" tabindex="0"><table class="contenttable"><thead><tr><th scope="col">Name</th><th scope="col">Type</th><th scope="col">ECTS</th><th scope="col">Location</th><th scope="col">Semester</th></tr></thead><tbody><tr><td><a href="https://www.mpi-inf.mpg.de/departments/databases-and-information-systems/teaching/ws1920/irdm19" target="_blank">Information Retrieval and Data Mining</a></td><td>Core course</td><td>9 ECTS</td><td>Saarland University</td><td>Winter 2019/20</td></tr><tr><td><a href="https://www.mpi-inf.mpg.de/question-answering-systems/" target="_blank">Question Answering Systems</a></td><td>Advanced course</td><td>6 ECTS</td><td>Saarland University</td><td>Summer 2020</td></tr><tr><td><a href="https://www.mpi-inf.mpg.de/departments/databases-and-information-systems/teaching/ws2021/selected-topics-in-question-answering" target="_blank">Selected Topics in Question Answering</a></td><td>Seminar</td><td>7 ECTS</td><td>Saarland University</td><td>Winter 2020/21</td></tr></tbody></table></div></div></div></div></div> <div id="c26086" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> FAITH: Faithful Temporal Question Answering over Heterogeneous Sources </h2></header><div class="content__bd"><p class="text-justify">Temporal question answering (QA) involves time constraints, with phrases such as "... in 2019" or "... before COVID". In the former, time is an explicit condition, in the latter it is implicit. State-of-the-art methods have limitations along three dimensions. First, with neural inference, time constraints are merely soft-matched, giving room to invalid or inexplicable answers. Second, questions with implicit time are poorly supported. Third, answers come from a single source: either a knowledge base (KB) or a text corpus. We propose a temporal QA system that addresses these shortcomings. First, it enforces temporal constraints for faithful answering with tangible evidence. Second, it properly handles implicit questions. Third, it operates over heterogeneous sources, covering KB, text and web tables in a unified manner. The method has three stages: (i) understanding the question and its temporal conditions, (ii) retrieving evidence from all sources, and (iii) faithfully answering the question. As implicit questions are sparse in prior benchmarks, we introduce a principled method for generating diverse questions. Experiments show superior performance over a suite of baselines.</p><div class="box"><p><a href="https://arxiv.org/abs/2402.15400" target="_blank" rel="noreferrer">Faithful Temporal Question Answering over Heterogeneous Sources</a>, Zhen Jia, Philipp Christmann, and Gerhard Weikum, WWW 2024.<br> [<a href="https://faith.mpi-inf.mpg.de" target="_blank" rel="noreferrer">Website</a>] [<a href="https://arxiv.org/abs/2402.15400" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://github.com/zhenjia2017/FAITH" target="_blank" rel="noreferrer">Code</a>] [<a href="https://qa.mpi-inf.mpg.de/faith/2024-www-poster-faith.pdf" target="_blank" rel="noreferrer">Poster</a>]</p></div><div class="box"><p>TIQ: A Benchmark for Temporal Question Answering with Implicit Time Constraints, Zhen Jia, Philipp Christmann, and Gerhard Weikum, TempWeb@WWW 2024.<br> [<a href="https://faith.mpi-inf.mpg.de" target="_blank" rel="noreferrer">Website</a>] [<a href="https://github.com/zhenjia2017/TIQ" target="_blank" rel="noreferrer">Code</a>] [<a href="https://qa.mpi-inf.mpg.de/tiq/2024-www-tiq-slides.pdf" target="_blank" rel="noreferrer">Slides</a>]</p></div></div></div></div></div> <div id="c26084" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c24994" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> REIGN: Robust Training for Conversational Question Answering Models with Reinforced Reformulation Generation </h2></header><div class="content__bd"><p class="text-justify">Models for conversational question answering (ConvQA) over knowledge graphs (KGs) are usually trained and tested on benchmarks of gold QA pairs. This implies that training is limited to surface forms seen in the respective datasets, and evaluation is on a small set of held-out questions. Through our proposed framework REIGN, we take several steps to remedy this restricted learning setup. First, we systematically generate reformulations of training questions to increase robustness of models to surface form variations. This is a particularly challenging problem, given the incomplete nature of such questions. Second, we guide ConvQA models towards higher performance by feeding it only those reformulations that help improve their answering quality, using deep reinforcement learning. Third, we demonstrate the viability of training major model components on one benchmark and applying them zero-shot to another. Finally, for a rigorous evaluation of robustness for trained models, we use and release large numbers of diverse reformulations generated by prompting GPT for benchmark test sets (resulting in 20x increase in sizes). Our findings show that ConvQA models with robust training via reformulations, significantly outperform those with standard training from gold QA pairs only.</p><div class="box"><p><a href="https://arxiv.org/abs/2310.13505#" target="_blank" rel="noreferrer">Robust Training for Conversational Question Answering Models with Reinforced Reformulation Generation</a>, Magdalena Kaiser, Rishiraj Saha Roy, and Gerhard Weikum, WSDM 2024.<br> [<a href="https://reign.mpi-inf.mpg.de" target="_blank" rel="noreferrer">Website</a>] [<a href="https://arxiv.org/abs/2310.13505#" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://github.com/magkai/REIGN" target="_blank" rel="noreferrer">Code</a>] [Slides] [Video] </p></div></div></div></div></div> <div id="c26082" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c22266" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> EXPLAIGNN: Explainable Conversational Question Answering on Heterogeneous Sources </h2></header><div class="content__bd"><p class="text-justify">In conversational question answering (ConvQA), users express their information needs through a series of utterances with incomplete context and ad hoc style. Existing ConvQA methods typically rely on a single information source, like a curated knowledge base (KB), a text collection, or a set of Web tables, thereby reducing the overall answer recall. Further, none of them provide explanations that support the answer derivation process. We propose EXPLAIGNN: a method that overcomes these limitations by integrating information from a mixture of sources with user-comprehensible explanations for answers. Our technique constructs a heterogeneous graph from entities and evidence snippets retrieved from a KB, a text corpus, infoboxes, and Web tables. This large graph is then iteratively reduced via graph neural networks that incorporate question-level attention, until the best answers and their explanations are distilled. Comprehensive experiments show that EXPLAIGNN improves answering performance over state-of-the-art ConvQA baselines. A crowdsourced user study demonstrates that answers derived by the proposed framework are understandable by end users.</p><div class="box"><p><strong><a href="https://explaignn.mpi-inf.mpg.de" target="_blank" rel="noreferrer">Explainable Conversational Question Answering over Heterogeneous Sources via Iterative Graph Neural Networks</a>, </strong>Philipp Christmann, Rishiraj Saha Roy, and Gerhard Weikum, SIGIR 2023.<br> [<a href="https://arxiv.org/abs/2305.01548" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://explaignn.mpi-inf.mpg.de" target="_blank" rel="noreferrer">Website</a>] [<a href="https://github.com/PhilippChr/EXPLAIGNN" target="_blank" rel="noreferrer">Code</a>] [<a href="https://qa.mpi-inf.mpg.de/explaignn/explaignn-extended-slides.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="https://qa.mpi-inf.mpg.de/explaignn/explaignn-acm-video.mp4" target="_blank" rel="noreferrer">Video</a>] [<a href="https://qa.mpi-inf.mpg.de/explaignn/explaignn_user_study.zip" target="_blank" rel="noreferrer">User study</a>]</p></div></div></div></div></div> <div id="c24103" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c19887" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> CONVINSE: Conversational Question Answering on Heterogeneous Sources </h2></header><div class="content__bd"><p class="text-justify">Conversational question answering (ConvQA) tackles sequential information needs where contexts in follow-up questions are left implicit. Current ConvQA systems operate over homogeneous sources of information: either a knowledge base (KB), or a text corpus, or a collection of tables. This project addresses the novel issue of jointly tapping into all of these together, this way boosting answer coverage. We present CONVINSE, an end-to-end pipeline for ConvQA over heterogeneous sources, operating in three stages: i) learning an explicit structured representation of an incoming question and its conversational context, ii) harnessing this frame-like representation to uniformly capture relevant evidences from KB, text, and tables, and iii) running a fusion-in-decoder model to generate the answer. We construct and release the first benchmark, ConvMix, for ConvQA over heterogeneous sources, comprising 3000 real-user conversations with over 15000 questions, along with entity annotations, completed question utterances, and question paraphrases. Experiments demonstrate the viability and advantages of our method, compared to state-of-the-art baselines.</p><div class="box"><p><strong><a href="https://dl.acm.org/doi/10.1145/3477495.3531815" target="_blank" rel="noreferrer">Conversational Question Answering on Heterogeneous Sources</a>,</strong> Philipp Christmann, Rishiraj Saha Roy, and Gerhard Weikum, SIGIR 2022.<br> [<a href="https://arxiv.org/pdf/2204.11677.pdf" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://convinse.mpi-inf.mpg.de" target="_blank" rel="noreferrer">Website</a>] [<a href="https://convinse.mpi-inf.mpg.de" target="_blank" rel="noreferrer">Benchmark</a>] [<a href="https://github.com/PhilippChr/CONVINSE" target="_blank" rel="noreferrer">Code</a>] [<a href="https://qa.mpi-inf.mpg.de/convinse/SIGIR2022-slides-convinse.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="https://qa.mpi-inf.mpg.de/convinse/SIGIR2022-poster-convinse.pdf" target="_blank" rel="noreferrer">Poster</a>] [<a href="https://qa.mpi-inf.mpg.de/convinse/convinse-extended-video.mp4" target="_blank" rel="noreferrer">Video</a>] [<strong><a href="https://openreview.net/forum?id=B3Tel3kLxje&noteId=DSnwAi3Iz6w" target="_blank" rel="noreferrer">ACM Badge</a></strong>]</p></div><div class="box"><p><strong><a href="http://arxiv.org/abs/2306.12235" target="_blank" rel="noreferrer">CompMix: A Benchmark for Heterogeneous Question Answering</a>,</strong> Philipp Christmann, Rishiraj Saha Roy, and Gerhard Weikum, WWW 2024.<br> [<a href="http://arxiv.org/abs/2306.12235" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://qa.mpi-inf.mpg.de/compmix" target="_blank" rel="noreferrer">Website</a>] [<a href="https://qa.mpi-inf.mpg.de/compmix/" target="_blank" rel="noreferrer">Benchmark</a>] [<a href="https://qa.mpi-inf.mpg.de/compmix/2024-www-compmix-slides.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="https://qa.mpi-inf.mpg.de/compmix/2024-www-compmix-promotional-video.mp4" target="_blank" rel="noreferrer">Video</a>]</p></div></div></div></div></div> <div id="c24105" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c19343" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> CLOCQ: Search Space Reduction for Complex Question Answering over Knowledge Bases </h2></header><div class="content__bd"><p class="text-justify">Answering complex questions over knowledge bases (KB-QA) faces huge input data with billions of facts, involving millions of entities and thousands of predicates. For efficiency, QA systems first reduce the answer search space by identifying a set of facts that is likely to contain all answers and relevant cues. The most common technique or doing this is to apply named entity disambiguation (NED) systems to the question, and retrieve KB facts for the disambiguated entities. This work presents CLOCQ, an efficient method that prunes irrelevant parts of the search space using KB-aware signals. CLOCQ uses a top-k query processor over score-ordered lists of KB items that combine signals about lexical matching, relevance to the question, coherence among candidate items, and connectivity in the KB graph. Experiments with two recent QA benchmarks for complex questions demonstrate the superiority of CLOCQ over state-of-the-art baselines with respect to answer presence, size of the search space, and runtimes.</p><div class="box"><p><strong><a href="https://dl.acm.org/doi/10.1145/3488560.3498488" target="_blank" rel="noreferrer">Beyond NED: Fast and Effective Search Space Reduction for Complex Question Answering over Knowledge Bases</a>, </strong>Philipp Christmann, Rishiraj Saha Roy, and Gerhard Weikum, WSDM 2022.<br> [<a href="https://arxiv.org/abs/2108.08597" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://clocq.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">Website</a>] [<a href="https://github.com/PhilippChr/CLOCQ" target="_blank" rel="noreferrer">Code</a>] [<a href="http://people.mpi-inf.mpg.de/~rsaharo/wsdm22slides_pcrsrgw.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="http://people.mpi-inf.mpg.de/~rsaharo/wsdm22poster_pcrsrgw.pdf" target="_blank" rel="noreferrer">Poster</a>] [<a href="http://people.mpi-inf.mpg.de/~pchristm/clocq-extended-video.mp4" target="_blank" rel="noreferrer">Video</a>]</p></div><div class="box"><p><strong><a href="https://dl.gi.de/bitstream/handle/20.500.12116/40333/B5-5.pdf" target="_blank" rel="noreferrer">CLOCQ: A Toolkit for Fast and Easy Access to Knowledge Bases</a></strong>, Philipp Christmann, Rishiraj Saha Roy, and Gerhard Weikum, <strong><a href="https://sites.google.com/view/btw-2023-tud" target="_blank" rel="noreferrer">BTW 2023</a>.</strong><br> [<a href="https://github.com/PhilippChr/CLOCQ" target="_blank" rel="noreferrer">Code</a>] [<a href="https://qa.mpi-inf.mpg.de/clocq/btw-poster.pdf" target="_blank" rel="noreferrer">Poster</a>] [<a href="https://qa.mpi-inf.mpg.de/clocq/btw-slides.pdf" target="_blank" rel="noreferrer">Slides</a>]</p></div><div class="box"><p><strong><a href="https://pure.mpg.de/rest/items/item_3492002/component/file_3492003/content" target="_blank" rel="noreferrer">Question Entity and Relation Linking to Knowledge Bases via CLOCQ</a></strong>, Philipp Christmann, Rishiraj Saha Roy, and Gerhard Weikum, <strong><a href="https://smart-task.github.io/2022/" target="_blank" rel="noreferrer">SMART@ISWC '22</a>.</strong><br> [<a href="https://github.com/PhilippChr/CLOCQ-pruning-module/" target="_blank" rel="noreferrer">Code</a>] [<a href="https://qa.mpi-inf.mpg.de/clocq/smart-slides.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="https://qa.mpi-inf.mpg.de/clocq/smart-video.mp4" target="_blank" rel="noreferrer">Video</a>]</p></div></div></div></div></div> <div id="c24107" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c19018" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> EXAQT: Complex Temporal Question Answering on Knowledge Graphs </h2></header><div class="content__bd"><p class="text-justify">Questions with temporal intent are a special class of practical importance, but have not received much attention in research. This project presents EXAQT, the first end-to-end system for answering complex temporal questions that have multiple entities and predicates, and associated temporal conditions. EXAQT answers natural language questions over KGs in two stages, one geared towards high recall, the other towards precision at top ranks. The first step computes question-relevant compact subgraphs within the KG, and judiciously enhances them with pertinent temporal facts, both using fine-tuned BERT models. The second step constructs relational graph convolutional networks (R-GCN) from the first step's output, and enhances the R-GCNs with time-aware entity embeddings and attention over temporal relations. We evaluate EXAQT on a large dataset of 16k temporal questions compiled from a variety of general purpose KG-QA benchmarks. Results show that it outperforms three state-of-the-art systems for answering complex questions over KGs, thereby justifying specialized treatment of temporal QA.</p><div class="box"><p><strong><a href="https://dl.acm.org/doi/abs/10.1145/3459637.3482416" target="_blank" rel="noreferrer">Complex Temporal Question Answering on Knowledge Graphs</a>, </strong>Zhen Jia, Soumajit Pramanik, Rishiraj Saha Roy, and Gerhard Weikum, CIKM 2021.<br> [<a href="https://arxiv.org/abs/2109.08935" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://exaqt.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">Data+Demo</a>] [<a href="https://github.com/zhenjia2017/EXAQT" target="_blank" rel="noreferrer">Code</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/cikm21slides_zjsprsrgw.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/cikm21poster_zjsprsrgw.pdf" target="_blank" rel="noreferrer">Poster</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/cikm21video_zjsprsrgw.mp4" target="_blank" rel="noreferrer">Video</a>]</p></div></div></div></div></div> <div id="c24111" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c18777" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> CONQUER: Reinforcement Learning from Reformulations in Conversational Question Answering </h2></header><div class="content__bd"><p class="text-justify">Conversational question answering (ConvQA) is becoming popular for interaction with personal assistants. State-of-the-art methods for ConvQA over knowledge graphs can only learn from crisp question-answer pairs found in popular benchmarks. In reality, however, such training data is hard to come by: Web users would rarely mark answers explicitly as correct or wrong. In this project, we take a step towards a more natural learning paradigm - from noisy and implicit feedback via question reformulations. A reformulation is likely to be triggered by an incorrect system response, whereas a new follow-up information need could often be indicative of correctness in the previous turn. We present a reinforcement learning model, termed CONQUER (Conversational Question answering with Reformulations), that is naturally suitable for modeling a stream of such reformulations. CONQUER models the answering process as multiple agents walking in parallel on the knowledge graph, where the walks are determined by actions sampled using a policy network. This policy network takes the question along with the conversational context as inputs, and is trained via noisy rewards obtained from the reformulation likelihood. To evaluate CONQUER, we create and release ConvRef, a benchmark with about 11k natural conversations containing around 205k reformulations. Experiments show that CONQUER successfully learns to answer conversational questions from noisy reward signals, significantly improving over the state-of-the-art baseline CONVEX.</p><div class="box"><p><strong><a href="https://dl.acm.org/doi/10.1145/3404835.3462859" target="_blank" rel="noreferrer">Reinforcement Learning from Reformulations in Conversational Question Answering over Knowledge Graphs</a></strong>, Magdalena Kaiser, Rishiraj Saha Roy, and Gerhard Weikum, SIGIR 2021.<br> [<a href="https://arxiv.org/abs/2105.04850" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://conquer.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">Data+Demo</a>] [<a href="https://github.com/magkai/CONQUER" target="_blank" rel="noreferrer">Code</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/sigir21slides_mkrsrgw.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/sigir21video_mkrsrgw.mp4" target="_blank" rel="noreferrer">Video</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/sigir21poster_mkrsrgw.pdf" target="_blank" rel="noreferrer">Poster</a>] [<strong><a href="https://openreview.net/forum?id=udB5kZIauoX" target="_blank" rel="noreferrer">ACM Badge</a></strong>]</p></div></div></div></div></div> <div id="c24109" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c19190" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> UNIQORN: Unified Question Answering over RDF Knowledge Graphs and Text Sources </h2></header><div class="content__bd"><p class="text-justify">Question answering over knowledge graphs and other RDF data has been greatly advanced, with a number of good systems providing crisp answers for natural language questions or telegraphic queries. Some of these systems incorporate textual sources as additional evidence for the answering process, but cannot compute answers that are present in text alone. Conversely, systems from the IR and NLP communities have addressed QA over text, but such systems barely utilize semantic data and knowledge. This paper presents the first QA system that can seamlessly operate over RDF datasets and text corpora, or both together, in a unified framework. Our method, called UNIQORN, builds a context graph on-the-fly, by retrieving question-relevant triples from the RDF data and/or snippets from a text corpus, using a fine-tuned BERT model. The resulting graph is typically rich but highly noisy. UNIQORN copes with this input by advanced graph algorithms for Group Steiner Trees, that identify the best answer candidates in the context graph. Experimental results on several benchmarks of complex questions with multiple entities and relations, show that UNIQORN produces results comparable to the state-of-the-art on KGs, text corpora, and heterogeneous sources. The graph-based methodology provides user-interpretable evidence for the complete answering process.</p><div class="box"><p><strong>UNIQORN: Unified Question Answering over RDF Knowledge Graphs and Natural Language Text, </strong>Soumajit Pramanik, Jesujoba Alabi, Rishiraj Saha Roy, and Gerhard Weikum, arXiv 2021.<br> [<a href="https://arxiv.org/abs/2108.08614" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://uniqorn.mpi-inf.mpg.de" target="_blank" rel="noreferrer">Data+Demo</a>] [<a href="https://github.com/ajesujoba/UNIQORN" target="_blank" rel="noreferrer">Code</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/arxiv21poster_spjarsrgw.pdf" target="_blank" rel="noreferrer">Poster</a>]</p></div></div></div></div></div> <div id="c24115" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c16916" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> Tutorial on Question Answering over Curated and Open Web Sources </h2></header><div class="content__bd"><p class="text-justify">The last few years have seen an explosion of research on the topic of automated question answering (QA), spanning the communities of information retrieval, natural language processing, and artificial intelligence. This tutorial would cover the highlights of this really active period of growth for QA to give the audience a grasp over the families of algorithms that are currently being used. We partition research contributions by the underlying source from where answers are retrieved: curated knowledge graphs, unstructured text, or hybrid corpora. We choose this dimension of partitioning as it is the most discriminative when it comes to algorithm design. Other key dimensions are covered within each sub-topic: like the complexity of questions addressed, and degrees of explainability and interactivity introduced in the systems. We would conclude the tutorial with the most promising emerging trends in the expanse of QA, that would help new entrants into this field make the best decisions to take the community forward. Much has changed in the community since the last tutorial on QA in SIGIR 2016, and we believe that this timely overview will indeed benefit a large number of conference participants.</p><div class="box"><p><strong><a href="https://link.springer.com/book/10.1007/978-3-031-79512-1" target="_blank" rel="noreferrer">Question Answering for the Curated Web Tasks and Methods in QA over Knowledge Bases and Text Collections</a></strong>, Rishiraj Saha Roy and Avishek Anand, Springer, 2022.</p></div><div class="box"><p><strong><a href="https://dl.acm.org/doi/10.1145/3397271.3401421" target="_blank" rel="noreferrer">Question Answering over Curated and Open Web Sources</a></strong>, Rishiraj Saha Roy and Avishek Anand, SIGIR 2020.<br> [<a href="https://www.avishekanand.com/talk/sigir20-tute/" target="_blank" rel="noreferrer">Website</a>] [<a href="https://arxiv.org/pdf/2004.11980.pdf" target="_blank" rel="noreferrer">Preprint</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/sigir20slides_rsraa.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/sigir20video_rsr.mp4" target="_blank" rel="noreferrer">Video Part 1</a>] [Video Part 2]</p></div></div></div></div></div> <div id="c24113" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c15653" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> CROWN: Conversational Question Answering over Passages </h2></header><div class="content__bd"><p class="text-justify">Question answering (QA) over text passages is a problem of long-standing interest in information retrieval. Recently, the conversational setting has attracted attention, where a user asks a sequence of questions to satisfy her information needs around a topic. While this setup is a natural one and similar to humans conversing with each other, it introduces a key research challenge: understanding the context left implicit by the user in follow-up questions. In this work, we demonstrate CROWN (Conversational passage ranking by Reasoning Over Word Networks): an unsupervised yet effective system for conversational QA with passage responses, that supports several modes of context propagation over multiple turns. To this end, CROWN first builds a word proximity network (WPN) from large corpora to store statistically significant term co-occurrences. At answering time, passages are ranked by a combination of their similarity to the question, and coherence of query terms within: these factors are measured by reading off node and edge weights from the WPN. CROWN provides an interface that is both intuitive for end-users, and insightful for experts for reconfiguration to individual setups. CROWN was evaluated on TREC CAsT data, where it achieved above-median performance in a pool of neural methods.</p><div class="box"><p><strong><a href="https://dl.acm.org/doi/10.1145/3397271.3401399" target="_blank" rel="noreferrer">Conversational Question Answering over Passages by Leveraging Word Proximity Networks</a>, </strong>Magdalena Kaiser, Rishiraj Saha Roy, and Gerhard Weikum, SIGIR 2020.<br> [<a href="https://arxiv.org/pdf/2004.13117.pdf" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://crown.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">Demo</a>] [<a href="https://github.com/magkai/CROWN" target="_blank" rel="noreferrer">Code</a>] [<a href="http://qa.mpi-inf.mpg.de/crownvideo.mp4" target="_blank" rel="noreferrer">Video</a>] </p></div><div class="box"><p><strong><a href="https://trec.nist.gov/pubs/trec28/papers/mpi-inf-d5.C.pdf" target="_blank" rel="noreferrer">CROWN: Conversational Passage Ranking by Reasoning over Word Networks,</a></strong> Magdalena Kaiser, Rishiraj Saha Roy, and Gerhard Weikum, TREC 2019.<br> [<a href="https://arxiv.org/pdf/1911.02850.pdf" target="_blank" rel="noreferrer">Preprint</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/trec19slides_mkrsrgw.pdf" target="_blank" rel="noreferrer">Slides]</a> [<a href="http://qa.mpi-inf.mpg.de/rsaharo/trec19poster_mkrsrgw.pdf" target="_blank" rel="noreferrer">Poster</a>] [<a href="http://people.mpi-inf.mpg.de/~rsaharo/trec19.bib" target="_blank" rel="noreferrer">BibTeX</a>]</p></div></div></div></div></div> <div id="c24117" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c15047" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> CONVEX: Conversational Question Answering over Knowledge Graphs </h2></header><div class="content__bd"><p class="text-justify">Fact-centric information needs are rarely one-shot; users typically ask follow-up questions to explore a topic. In such a conversational setting, the user鈥檚 inputs are often incomplete, with entities or predicates left out, and ungrammatical phrases. This poses a huge challenge to question answering (QA) systems that typically rely on cues in full-fledged interrogative sentences. As a solution, in this project, we develop CONVEX: an unsupervised method that can answer incomplete questions over a knowledge graph (KG) by maintaining conversation context using entities and predicates seen so far and automatically inferring missing or ambiguous pieces for follow-up questions. The core of our method is a graph exploration algorithm that judiciously expands a frontier to find candidate answers for the current question. To evaluate CONVEX, we release ConvQuestions, a crowdsourced benchmark with 11,200 distinct conversations from five different domains. We show that CONVEX: (i) adds conversational support to any stand-alone QA system, and (ii) outperforms state-of-the-art baselines and question completion strategies.</p><div class="box"><p><strong><a href="https://dl.acm.org/citation.cfm?id=3358016" target="_blank" rel="noreferrer">Look before you Hop: Conversational Question Answering over Knowledge Graphs Using Judicious Context Expansion</a></strong>, Philipp Christmann, Rishiraj Saha Roy, Abdalghani Abujabal, Jyotsna Singh, and Gerhard Weikum, CIKM 2019.<br> [<a href="https://arxiv.org/pdf/1910.03262.pdf" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://convex.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">Data+Demo</a>] [<a href="https://github.com/PhilippChr/CONVEX" target="_blank" rel="noreferrer">Code</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/cikm19slides_pcrsraajsgw.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/cikm19poster_pcrsraajsgw.pdf" target="_blank" rel="noreferrer">Poster</a>] </p></div></div></div></div></div> <div id="c24119" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c15303" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> QUEST: Answering Complex Questions by Joining Multi-Document Evidence </h2></header><div class="content__bd"><p class="text-justify">Direct answering of questions that involve multiple entities and relations is a challenge for text-based QA. This problem is most pronounced when answers can be found only by joining evidence from multiple documents. Curated knowledge graphs (KGs) may yield good answers, but are limited by their inherent incompleteness and potential staleness. This project presents QUEST, a method that can answer complex questions directly from textual sources on-the-fly, by computing similarity joins over partial results from different documents. Our method is completely unsupervised, avoiding training-data bottlenecks and being able to cope with rapidly evolving ad hoc topics and formulation style in user questions. QUEST builds a noisy quasi KG with node and edge weights, consisting of dynamically retrieved entity names and relational phrases. It augments this graph with types and semantic alignments, and computes the best answers by an algorithm for Group Steiner Trees. We evaluate QUEST on benchmarks of complex questions, and show that it substantially outperforms state-of-the-art baselines.</p><div class="box"><p><strong><a href="https://dl.acm.org/authorize?N697030" target="_blank" rel="noreferrer">Answering Complex Questions by Joining Multi-Document Evidence with Quasi Knowledge Graphs</a></strong>, Xiaolu Lu, Soumajit Pramanik, Rishiraj Saha Roy, Abdalghani Abujabal, Yafang Wang, and Gerhard Weikum, SIGIR 2019.<br> [<a href="https://arxiv.org/abs/1908.00469" target="_blank" rel="noreferrer">Preprint</a>] [<a href="https://dl.acm.org/action/downloadSupplement?doi=10.1145%2F3331184.3331252&file=cite1-14h30-d1.mp4&download=true" target="_blank" rel="noreferrer">Video</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/sigir19slides_xlsprsraaywgw.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="http://qa.mpi-inf.mpg.de/quest/quest.zip" target="_blank" rel="noreferrer">Code+Data</a>] [<a href="https://quest.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">Demo</a>] [<a href="https://techcrunch.com/2019/07/31/amazon-develops-a-new-way-to-help-alexa-answer-complex-questions/?guccounter=1&guce_referrer_us=aHR0cHM6Ly9wZW9wbGUubXBpLWluZi5tcGcuZGUvfmFidWphYmFsLw&guce_referrer_cs=kF4ojawjASPuslcXx7gwTg" target="_blank" rel="noreferrer">TechCrunch</a>] [<a href="https://venturebeat.com/2019/07/31/amazons-ai-helps-find-answers-to-complex-questions/" target="_blank" rel="noreferrer">VentureBeat</a>] </p></div></div></div></div></div> <div id="c24121" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c14583" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> ComQA: A Community-sourced Dataset for Complex Factoid Question Answering </h2></header><div class="content__bd"><p class="text-justify">To bridge the gap between capabilities of the state-of-the-art in factoid question answering (QA) and what users ask, we need large datasets of real questions that capture the various phenomena of interest, and the associated diversity in formulation patterns. We introduce ComQA, a large dataset of real user questions that exhibit different challenging aspects such as compositionality, temporal reasoning, and comparisons. ComQA questions are selected from the WikiAnswers community QA platform, which typically contains questions that are not satisfactorily answerable by current search engines. Through a large crowdsourcing effort, we (i) extract factoid questions from the platform and group them into paraphrase clusters (such interrogative paraphrases have been showed to be very useful in developing robustness to syntactic variations), and (ii) annotate these question clusters with their answers from Wikipedia. ComQA contains 11, 214 questions grouped into 4, 834 paraphrase clusters. We describe this construction process in detail, highlighting measures taken to ensure high quality of the output. We also present an extensive analysis of our dataset, including performances of state-of-the-art systems, that demonstrate how ComQA can effectively drive future research.</p><div class="box"><p><strong><a href="https://www.aclweb.org/anthology/N19-1027" target="_blank" rel="noreferrer">ComQA: A Community-sourced Dataset for Complex Factoid Question Answering with Paraphrase Clusters</a></strong>, Abdalghani Abujabal, Rishiraj Saha Roy, Mohamed Yahya, and Gerhard Weikum, NAACL-HLT 2019.<br> [<a href="http://qa.mpi-inf.mpg.de/comqa/" target="_blank" rel="noreferrer">Data</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/naacl19poster_aarsrmygw.pdf" target="_blank" rel="noreferrer">Poster</a>]</p></div></div></div></div></div> <div id="c24123" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c14579" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> TEQUILA: Temporal Question Answering over Knowledge Bases </h2></header><div class="content__bd"><p class="text-justify">Question answering over knowledge bases (KB-QA) poses challenges in handling complex questions that need to be decomposed into sub-questions. An important case, addressed in this project, is that of temporal questions, where cues for temporal relations need to be discovered and handled. We propose TEQUILA, an enabler method for temporal QA that can run on top of any KB-QA engine. TEQUILA has four stages. It detects if a question has temporal intent. It decomposes and rewrites the question into non-temporal sub-questions and temporal constraints. Answers to sub-questions are then retrieved from the underlying KB-QA engine. Finally, TEQUILA uses constraint reasoning on temporal intervals to compute final answers to the full question. Comparisons against state-of-the-art baselines show the viability of our method.</p><div class="box"><p><strong><a href="https://dl.acm.org/doi/10.1145/3269206.3269247" target="_blank" rel="noreferrer">TEQUILA: Temporal Question Answering over Knowledge Bases</a></strong>, Zhen Jia, Abdalghani Abujabal, Rishiraj Saha Roy, Jannik Str枚tgen, and Gerhard Weikum, CIKM 2018.<br> [<a href="https://arxiv.org/abs/1908.03650" target="_blank" rel="noreferrer">Preprint</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/cikm18poster_zjaarsrjsgw.pdf" target="_blank" rel="noreferrer">Poster</a>] [<a href="http://qa.mpi-inf.mpg.de/TempQuestions.zip" target="_blank" rel="noreferrer">Data</a>] [<a href="https://tequila.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">Demo</a>] [<a href="https://github.com/zhenjia2017/tequila" target="_blank" rel="noreferrer">Code</a>] </p></div><div class="box"><p><strong><a href="https://dl.acm.org/citation.cfm?id=3191536" target="_blank" rel="noreferrer">TempQuestions: A Benchmark for Temporal Question Answering</a></strong>, Zhen Jia, Abdalghani Abujabal, Rishiraj Saha Roy, Jannik Str枚tgen, and Gerhard Weikum, HQA 2018 (WWW Workshop).<br> [<a href="http://qa.mpi-inf.mpg.de/rsaharo/hqa18slides_zjaarsrjsgw.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="http://qa.mpi-inf.mpg.de/TempQuestions.zip" target="_blank" rel="noreferrer">Data</a>]</p></div></div></div></div></div> <div id="c24125" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c13231" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> NEQA: Never-Ending Learning for Open-Domain Question Answering over Knowledge Bases </h2></header><div class="content__bd"><p class="text-justify">Translating natural language questions to semantic representations such as SPARQL is a core challenge in open-domain question answering over knowledge bases (KB-QA). Existing methods rely on a clear separation between an offline training phase, where a model is learned, and an online phase where this model is deployed. Two major shortcomings of such methods are that (i) they require access to a large annotated training set that is not always readily available and (ii) they fail on questions from before-unseen domains. To overcome these limitations, this project presents NEQA, a continuous learning paradigm for KB-QA. Offline, NEQA automatically learns templates mapping syntactic structures to semantic ones from a small number of training question-answer pairs. Once deployed, continuous learning is triggered on cases where templates are insufficient. Using a semantic similarity function between questions and by judicious invocation of non-expert user feedback, NEQA learns new templates that capture previously-unseen syntactic structures. This way, NEQA gradually extends its template repository. NEQA periodically re-trains its underlying models, allowing it to adapt to the language used after deployment. Our experiments demonstrate NEQA鈥檚 viability, with steady improvement in answering quality over time, and the ability to answer questions from new domains.</p><div class="box"><p><strong><a href="https://dl.acm.org/doi/10.1145/3178876.3186004" target="_blank" rel="noreferrer">Never-Ending Learning for Open-Domain Question Answering over Knowledge Bases</a></strong>, Abdalghani Abujabal, Rishiraj Saha Roy, Mohamed Yahya, and Gerhard Weikum, WWW 2018.<br> [<a href="http://people.mpi-inf.mpg.de/~abujabal/publications/neqa/neqa.pdf" target="_blank" rel="noreferrer">Slides</a>] [<a href="http://qa.mpi-inf.mpg.de/rsaharo/neqa-wq-templates.zip" target="_blank" rel="noreferrer">Templates</a>] [<a href="https://www.techatbloomberg.com/blog/bloomberg-researchers-giorgio-stefanoni-and-mohamed-yahya-to-present-two-papers-at-the-web-conference-in-lyon/" target="_blank" rel="noreferrer">Tech@Bloomberg</a>] </p></div></div></div></div></div> <div id="c24127" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c14581" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> TIPI: Answer Type Prediction for Answering Compositional Questions </h2></header><div class="content__bd"><p class="text-justify">This project investigates the problem of answering compositional factoid questions over knowledge bases (KB) under efficiency constraints. The method, called TIPI, (i) decomposes compositional questions, (ii) predicts answer types for individual sub-questions, (iii) reasons over the compatibility of joint types, and finally, (iv) formulates compositional SPARQL queries respecting type constraints. TIPI's answer type predictor is trained using distant supervision, and exploits lexical, syntactic and embedding-based features to compute context- and hierarchy-aware candidate answer types for an input question. Experiments on a recent benchmark show that TIPI results in state-of-the-art performance under the real-world assumption that only a single SPARQL query can be executed over the KB, and substantial reduction in the number of queries in the more general case.</p><div class="box"><p><strong><a href="http://www.aclweb.org/anthology/I17-2038" target="_blank" rel="noreferrer">Efficiency-aware Answering of Compositional Questions using Answer Type Prediction</a></strong>, David Ziegler, Abdalghani Abujabal, Rishiraj Saha Roy, and Gerhard Weikum, IJCNLP 2017.<br> [<a href="http://qa.mpi-inf.mpg.de/rsaharo/ijcnlp17poster_dzaarsrgw.pdf" target="_blank" rel="noreferrer">Poster</a>]</p></div></div></div></div></div> <div id="c24135" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c13227" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> QUINT: Automated Template Generation for Question Answering over Knowledge Graphs </h2></header><div class="content__bd"><p class="text-justify">Templates are an important asset for question answering over knowledge graphs, simplifying the semantic parsing of input utterances and generating structured queries for interpretable answers. Stateof-the-art methods rely on hand-crafted templates with limited coverage. This project presents QUINT, a system that automatically learns utterance-query templates solely from user questions paired with their answers. Additionally, QUINT is able to harness language compositionality for answering complex questions without having any templates for the entire question. Experiments with different benchmarks demonstrate the high quality of QUINT.</p><div class="box"><p><strong><a href="https://dl.acm.org/doi/10.1145/3038912.3052583" target="_blank" rel="noreferrer">Automated Template Generation for Question Answering over Knowledge Graphs</a></strong>, Abdalghani Abujabal, Mohamed Yahya, Mirek Riedewald, and Gerhard Weikum, WWW 2017.<br> [<a href="https://dl.acm.org/doi/10.1145/3038912.3052583" target="_blank" rel="noreferrer">Slides</a>] [<a href="http://qa.mpi-inf.mpg.de/complex-questions-wikianswers-150.json" target="_blank" rel="noreferrer">Data</a>]</p></div><div class="box"><p><strong><a href="http://aclweb.org/anthology/D17-2011" target="_blank" rel="noreferrer">QUINT: Interpretable Question Answering over Knowledge Bases</a></strong>, Abdalghani Abujabal, Rishiraj Saha Roy, Mohamed Yahya, and Gerhard Weikum, EMNLP 2017.<br> [<a href="https://quint.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">Demo</a>] [<a href="http://people.mpi-inf.mpg.de/~abujabal/publications/quint/emnlp-2017-demo-poster.pdf" target="_blank" rel="noreferrer">Poster</a>] </p></div></div></div></div></div> <div id="c24129" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c14599" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> TriniT: Relationship Queries on Extended Knowledge Graphs </h2></header><div class="content__bd"><p class="text-justify">Entity search over text corpora is not geared for relationship queries where answers are tuples of related entities and where a query often requires joining cues from multiple documents. With large knowledge graphs, structured querying on their relational facts is an alternative, but often suffers from poor recall because of mismatches between user queries and the knowledge graph or because of weakly populated relations. This project presents the TriniT search engine for querying and ranking on extended knowledge graphs that combine relational facts with textual web contents. Our query language is designed on the paradigm of SPO triple patterns, but is more expressive, supporting textual phrases for each of the SPO arguments. We present a model for automatic query relaxation to compensate for mismatches between the data and a user鈥檚 query. Query answers - tuples of entities - are ranked by a statistical language model. We present experiments with different benchmarks, including complex relationship queries, over a combination of the YAGO knowledge graph and the entity-annotated ClueWeb09 corpus.</p><div class="box"><p><strong><a href="https://dl.acm.org/citation.cfm?id=2835795" target="_blank" rel="noreferrer">Relationship Queries on Extended Knowledge Graphs</a></strong>, Mohamed Yahya, Denilson Barbosa, Klaus Berberich, Qiuyue Wang, and Gerhard Weikum, WSDM 2016.</p></div></div></div></div></div> <div id="c24133" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c14585" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> DEANNA: Robust Question Answering over the Web of Linked Data </h2></header><div class="content__bd"><p class="text-justify">Knowledge bases and the Web of Linked Data have become important assets for search, recommendation, and analytics. Natural-language questions are a user-friendly mode of tapping this wealth of knowledge and data. However, question answering technology does not work robustly in this setting as questions have to be translated into structured queries and users have to be careful in phrasing their questions. This project advocates a new approach that allows questions to be partially translated into relaxed queries, covering the essential but not necessarily all aspects of the user's input. To compensate for the omissions, we exploit textual sources associated with entities and relational facts. Our system translates user questions into an extended form of structured SPARQL queries, with text predicates attached to triple patterns. Our solution is based on a novel optimization model, cast into an integer linear program, for joint decomposition and disambiguation of the user question. We demonstrate the quality of our methods through experiments with the QALD benchmark.</p><div class="box"><p><strong><a href="https://dl.acm.org/citation.cfm?id=2505677" target="_blank" rel="noreferrer">Robust Question Answering over the Web of Linked Data</a></strong>, Mohamed Yahya, Klaus Berberich, Shady Elbassuoni, and Gerhard Weikum, CIKM 2013.</p></div><div class="box"><p><strong><a href="https://www.aclweb.org/anthology/D12-1035.pdf" target="_blank" rel="noreferrer">Natural Language Questions for the Web of Data</a></strong>, Mohamed Yahya, Klaus Berberich, Shady Elbassuoni, Maya Ramanath, Volker Tresp, and Gerhard Weikum, EMNLP 2012.</p></div></div></div></div></div> <div id="c24131" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c15063" class="content content--list content--layout-0 has-header frame--default frame-space-after-box-wo-bo-di content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> D5 Members </h2></header><div class="content__bd"><div class="tx-ttaddress"><div class="tt_address_list dm-mpiSimple"><div class="box_ttaddress tt_address_simple"><ul class="autocolumns"><li class="name"><a class="url" href="https://people.mpi-inf.mpg.de/~mkaiser"> Kaiser, Magdalena </a></li><li class="name"><a class="url" href="https://people.mpi-inf.mpg.de/~pchristm"> Christmann, Philipp </a></li><li class="name"><a class="url" href="https://people.mpi-inf.mpg.de/~weikum"> Weikum, Gerhard </a></li></ul></div></div></div></div></div><div class="frame-space-after-box-wo-bo-di"></div></div></div> <div id="c24137" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c13217" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> External Collaborators </h2></header><div class="content__bd"><ul><li>Zhen Jia, Southwest Jiaotong University, China</li><li>Soumajit Pramanik, IIT Bhilai, India</li><li>Abdalghani Abujabal, Amazon Alexa, Germany</li><li>Xiaolu Lu, Microsoft, Australia</li><li>Jannik Str枚tgen, Bosch Center for AI, Germany</li><li>Yafang Wang, Ant Financial Services Group, China</li><li>Mohamed Yahya, Bloomberg, UK</li><li>Mirek Riedewald, Northeastern University, USA</li></ul></div></div></div></div> <div id="c24139" class="content content--div content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><hr class="ce-div" /></div></div></div></div> <div id="c13235" class="content content--text content--layout-0 has-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><header class="content__hd"><h2 class="content__ttl "> Resources </h2></header><div class="content__bd"><ul><li><a href="https://faith.mpi-inf.mpg.de" target="_blank" rel="noreferrer">TIQ</a>: A Benchmark for Temporal Question Answering with Implicit Time Constraints (KG+Text+Table+Infobox) [WWW 2024]</li><li><a href="https://qa.mpi-inf.mpg.de/compmix/" target="_blank" rel="noreferrer">CompMix</a>: A benchmark of complex QA over heterogeneous sources (KG+Text+Table+Infobox) [WWW 2024]</li><li><a href="https://convinse.mpi-inf.mpg.de" target="_blank" rel="noreferrer">ConvMix</a>: A benchmark of conversational QA over heterogeneous sources (KG+Text+Table+Infobox) [SIGIR 2022]</li><li><a href="https://exaqt.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">TimeQuestions</a>: A benchmark of complex temporal questions collated from 8 general purpose KB-QA datasets [CIKM 2021]</li><li><a href="https://conquer.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">ConvRef</a>: A benchmark for with reformulations by real users for conversational question-answering [SIGIR 2021]</li><li><a href="https://convex.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">ConvQuestions:</a> A benchmark for conversational question-answering over knowledge graphs from five domains [CIKM 2019]</li><li><a href="http://qa.mpi-inf.mpg.de/comqa/" target="_blank" rel="noreferrer">ComQA</a>: A benchmark of real complex questions with interrogative paraphrases [NAACL-HLT 2019]</li><li><a href="https://exaqt.mpi-inf.mpg.de/" target="_blank" rel="noreferrer">TempQuestions:</a> A benchmark of temporal questions collated from multiple question-answering benchmarks [CIKM 2018]</li><li><a href="http://qa.mpi-inf.mpg.de/complex-questions-wikianswers-150.json" target="_blank" rel="noreferrer">ComplexQuestions</a>: A benchmark of real questions with multiple entities and relations [WWW 2017]</li></ul></div></div></div></div> </div> </main> <!--TYPO3SEARCH_end--> <footer class="site__footer"> <div class="site-footer__to-top-lnk-ct js-to-top-btn-ct"> <a class="site-footer__to-top-lnk js-to-top-btn js-scroll-lnk" role="button" href="#top"><span class="site-footer__to-top-lnk-lbl">Top</span></a> </div> <div class="site-footer site-footer--contact"> <div class="site-footer__ct"> <div class="site-footer__row"> <section class="site-footer__sec site-footer__sec--quick-links js-collapse-ct"> <h2 class="site-footer__sec-ttl"> <a href="javascript:" role="button" class="site-footer__sec-ttl-lnk js-collapse-toggle is-collapsed"> Quick Links <i class="site-footer__sec-expand-icon mpg-icon mpg-icon-down2"></i> </a> </h2> <div class="site-footer__sec-bd site-footer-links js-collapse-target-ct is-collapsed"> <div class="js-collapse-target"> <div id="c22759" class="content content--menu_pages content--layout-10 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><ul><li><a href="/institute/address" target="" title="Location" class=""><span>Location</span></a></li><li><a href="/news/latest" target="" title="Press" class=""><span>Press</span></a></li><li><a href="/covid-19" target="" title="COVID-19" class=""><span>COVID-19</span></a></li></ul></div></div></div></div> </div> </div> </section> <section class="site-footer__sec site-footer__sec--social-media js-collapse-ct"> </section> <div class="site-footer__sec site-footer__sec--quick-actions"> <div id="c22761" class="content content--text content--layout-0 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><p><a href="mailto:kontakt@mpi-inf.mpg.de" class="button">Contact</a></p></div></div></div></div> </div> </div> </div> </div> <div class="site-footer site-footer--legal"> <div class="site-footer__ct"> <div class="site-footer__row"> <div class="site-footer__sec site-footer__org"> <img class="site-footer__org-img" alt="Bildmarke Max Planck Gesellschaft" src="/typo3conf/ext/mpi_inf_site_package/Resources/Public/Images/Core/mpg-bildmarke_w.svg" width="160" height="160" /> <div class="site-footer__org-lbl"> Max-Planck-Gesellschaft </div> </div> <div class="site-footer__sec site-footer__legal-links"> <div id="c22763" class="content content--menu_pages content--layout-10 has-no-header frame--default content--bg-none"><div class="content__ct"><div class="content__wrap"><div class="content__bd"><ul><li><a href="/sitemap" target="" title="Sitemap" class=""><span>Sitemap</span></a></li><li><a href="https://imprint.mpi-klsb.mpg.de/inf/www.mpi-inf.mpg.de" target="" title="Imprint" class=""><span>Imprint</span></a></li><li><a href="https://data-protection.mpi-klsb.mpg.de/inf/www.mpi-inf.mpg.de" target="" title="Data Protection" class=""><span>Data Protection</span></a></li></ul></div></div></div></div> </div> <div class="site-footer__sec site-footer__copyright"> 漏 2024, Max-Planck-Gesellschaft </div> </div> </div> </div> </footer> <script src="/typo3conf/ext/mpi_inf_site_package/Resources/Public/JavaScript/Libraries/jquery-1.12.4.min.js?1725541122"></script> <script src="/typo3conf/ext/mpi_inf_site_package/Resources/Public/JavaScript/Libraries/jquery-ui-1.13.2.min.js?1725541122"></script> <script src="/typo3conf/ext/mpi_inf_site_package/Resources/Public/JavaScript/Libraries/swiper-bundle-8.4.6.min.js?1725541122"></script> <script src="/typo3conf/ext/powermail/Resources/Public/JavaScript/Powermail/Form.min.js?1726560232" defer="defer"></script> <script src="/typo3conf/ext/mpi_inf_site_package/Resources/Public/JavaScript/Build/main.js?1725541122"></script> </body> </html>