CINXE.COM
Fabio Cuzzolin | Oxford Brookes University - Academia.edu
<!DOCTYPE html> <html lang="en" xmlns:fb="http://www.facebook.com/2008/fbml" class="wf-loading"> <head prefix="og: https://ogp.me/ns# fb: https://ogp.me/ns/fb# academia: https://ogp.me/ns/fb/academia#"> <meta charset="utf-8"> <meta name=viewport content="width=device-width, initial-scale=1"> <meta rel="search" type="application/opensearchdescription+xml" href="/open_search.xml" title="Academia.edu"> <title>Fabio Cuzzolin | Oxford Brookes University - Academia.edu</title> <!-- _ _ _ | | (_) | | __ _ ___ __ _ __| | ___ _ __ ___ _ __ _ ___ __| |_ _ / _` |/ __/ _` |/ _` |/ _ \ '_ ` _ \| |/ _` | / _ \/ _` | | | | | (_| | (_| (_| | (_| | __/ | | | | | | (_| || __/ (_| | |_| | \__,_|\___\__,_|\__,_|\___|_| |_| |_|_|\__,_(_)___|\__,_|\__,_| We're hiring! See https://www.academia.edu/hiring --> <link href="//a.academia-assets.com/images/favicons/favicon-production.ico" rel="shortcut icon" type="image/vnd.microsoft.icon"> <link rel="apple-touch-icon" sizes="57x57" href="//a.academia-assets.com/images/favicons/apple-touch-icon-57x57.png"> <link rel="apple-touch-icon" sizes="60x60" href="//a.academia-assets.com/images/favicons/apple-touch-icon-60x60.png"> <link rel="apple-touch-icon" sizes="72x72" href="//a.academia-assets.com/images/favicons/apple-touch-icon-72x72.png"> <link rel="apple-touch-icon" sizes="76x76" href="//a.academia-assets.com/images/favicons/apple-touch-icon-76x76.png"> <link rel="apple-touch-icon" sizes="114x114" href="//a.academia-assets.com/images/favicons/apple-touch-icon-114x114.png"> <link rel="apple-touch-icon" sizes="120x120" href="//a.academia-assets.com/images/favicons/apple-touch-icon-120x120.png"> <link rel="apple-touch-icon" sizes="144x144" href="//a.academia-assets.com/images/favicons/apple-touch-icon-144x144.png"> <link rel="apple-touch-icon" sizes="152x152" href="//a.academia-assets.com/images/favicons/apple-touch-icon-152x152.png"> <link rel="apple-touch-icon" sizes="180x180" href="//a.academia-assets.com/images/favicons/apple-touch-icon-180x180.png"> <link rel="icon" type="image/png" href="//a.academia-assets.com/images/favicons/favicon-32x32.png" sizes="32x32"> <link rel="icon" type="image/png" href="//a.academia-assets.com/images/favicons/favicon-194x194.png" sizes="194x194"> <link rel="icon" type="image/png" href="//a.academia-assets.com/images/favicons/favicon-96x96.png" sizes="96x96"> <link rel="icon" type="image/png" href="//a.academia-assets.com/images/favicons/android-chrome-192x192.png" sizes="192x192"> <link rel="icon" type="image/png" href="//a.academia-assets.com/images/favicons/favicon-16x16.png" sizes="16x16"> <link rel="manifest" href="//a.academia-assets.com/images/favicons/manifest.json"> <meta name="msapplication-TileColor" content="#2b5797"> <meta name="msapplication-TileImage" content="//a.academia-assets.com/images/favicons/mstile-144x144.png"> <meta name="theme-color" content="#ffffff"> <script> window.performance && window.performance.measure && window.performance.measure("Time To First Byte", "requestStart", "responseStart"); </script> <script> (function() { if (!window.URLSearchParams || !window.history || !window.history.replaceState) { return; } var searchParams = new URLSearchParams(window.location.search); var paramsToDelete = [ 'fs', 'sm', 'swp', 'iid', 'nbs', 'rcc', // related content category 'rcpos', // related content carousel position 'rcpg', // related carousel page 'rchid', // related content hit id 'f_ri', // research interest id, for SEO tracking 'f_fri', // featured research interest, for SEO tracking (param key without value) 'f_rid', // from research interest directory for SEO tracking 'f_loswp', // from research interest pills on LOSWP sidebar for SEO tracking 'rhid', // referrring hit id ]; if (paramsToDelete.every((key) => searchParams.get(key) === null)) { return; } paramsToDelete.forEach((key) => { searchParams.delete(key); }); var cleanUrl = new URL(window.location.href); cleanUrl.search = searchParams.toString(); history.replaceState({}, document.title, cleanUrl); })(); </script> <script async src="https://www.googletagmanager.com/gtag/js?id=G-5VKX33P2DS"></script> <script> window.dataLayer = window.dataLayer || []; function gtag(){dataLayer.push(arguments);} gtag('js', new Date()); gtag('config', 'G-5VKX33P2DS', { cookie_domain: 'academia.edu', send_page_view: false, }); gtag('event', 'page_view', { 'controller': "profiles/works", 'action': "summary", 'controller_action': 'profiles/works#summary', 'logged_in': 'false', 'edge': 'unknown', // Send nil if there is no A/B test bucket, in case some records get logged // with missing data - that way we can distinguish between the two cases. // ab_test_bucket should be of the form <ab_test_name>:<bucket> 'ab_test_bucket': null, }) </script> <script type="text/javascript"> window.sendUserTiming = function(timingName) { if (!(window.performance && window.performance.measure)) return; var entries = window.performance.getEntriesByName(timingName, "measure"); if (entries.length !== 1) return; var timingValue = Math.round(entries[0].duration); gtag('event', 'timing_complete', { name: timingName, value: timingValue, event_category: 'User-centric', }); }; window.sendUserTiming("Time To First Byte"); </script> <meta name="csrf-param" content="authenticity_token" /> <meta name="csrf-token" content="FmiXAVpe7uY0C8Ei3Ldczpzv_7LtkNTwPvJzHWVL1zFDv5eYi7kRSfNJCj7a6PrHclzu3pcZO2dVzMRvFjvHsg" /> <link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/wow-3d36c19b4875b226bfed0fcba1dcea3f2fe61148383d97c0465c016b8c969290.css" /><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/social/home-79e78ce59bef0a338eb6540ec3d93b4a7952115b56c57f1760943128f4544d42.css" /><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/single_work_page/figure_carousel-2004283e0948681916eefa74772df54f56cb5c7413d82b160212231c2f474bb3.css" /><script type="application/ld+json">{"@context":"https://schema.org","@type":"ProfilePage","mainEntity":{"@context":"https://schema.org","@type":"Person","name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin","image":"https://0.academia-photos.com/366407/112374/61740579/s200_fabio.cuzzolin.jpg","sameAs":["http://cms.brookes.ac.uk/staff/FabioCuzzolin/","http://perception.inrialpes.fr/people/Cuzzolin/","http://cms.brookes.ac.uk/staff/FabioCuzzolin/bibliography-evidence.html","http://www.bfasociety.org/","http://cms.brookes.ac.uk/staff/FabioCuzzolin/refs-gait.html","http://www.sipta.org/","http://www.linkedin.com/profile/view?id=96024872\u0026locale=en_US\u0026trk=tab_pro","http://arnetminer.org/person/-1472154.html"]},"dateCreated":"2011-03-17T00:43:31-07:00","dateModified":"2025-03-27T04:06:28-07:00","name":"Fabio Cuzzolin","description":"Fabio Cuzzolin was born in Jesolo, Italy. \nHe graduated magna cum laude with the University of Padua, was awarded a Ph.D. there in 2001 for a thesis entitled “Visions of a generalized probability theory\", and worked in world-class institutions such as the Washington University in St. Louis, Politecnico di Milano, the University of California at Los Angeles. In 2006 he was awarded a Marie Curie Fellowship with INRIA Rhone-Alpes, France. In 2007 he classified second there in the Senior Researcher national recruitment.\nHe has been at Brookes since 2008, took up a Senior Lectureship there in July'11, and a Readership in October 2011.\nHe has been nominated Subject Coordinator for the new Master's course in Computer Vision which will be launched by the Department in September 2013.\nHe has taken on the role of Head of the Artificial Intelligence and Vision research group in September 2012, and has been awarded in October 2012 a Next 10 award by the Faculty of Technology, Design and Environment as one of its top emerging researchers.\nHe is currently supervising two Ph.D. students, an EPSRC-funded postdoc, two visiting students from Turkey and Italy. Two more Ph.D. students will join his group in 2014. \n\nDr Cuzzolin is a world expert in uncertainty theory and belief functions theory. He worked extensively on the mathematical foundations of belief calculus. His main contribution is his geometric approach to uncertainty measures, in which uncertainty measures are represented as points of a Cartesian space and there analyzed. His work on the field is in the process of being published in two separate monographs published by Springer-Verlag (“The geometry of uncertainty”) and Lambert Academic Publishing ( \"Visions of a generalized probability theory\"). \n\nHe is also well known for his work in computer vision, mainly machine learning for human motion analysis, including tensorial models for identity recognition, metric learning for action recognition, and spectral techniques for articulated object segmentation and matching.\n\nHe is the author of some 90 peer-reviewed publications, published or under review, including two monographs, an edited Springer volume, 3 book chapters, 14 journals (+ 8 u/r), 9 chapters in collections.\nHe won awards for Best Paper at PRICAI'08, Poster Prize at the ISIPTA'11 Symposium on Imprecise Probabilities, Best Poster at the 2012 INRIA Summer School on Machine Learning and Visual Recognition, and was short-listed for prizes at the ECSQARU'11 and BMVC12 conferences, where he was given the Outstanding Reviewer Award. \n\nDr Cuzzolin is Associate Editor of the IEEE Transactions of Fuzzy Systems, Guest Editor for the International Journal of Approximate Reasoning, has been AE for “IEEE Transactions on Systems, Man, and Cybernetics C\", Guest Editor for “Information Fusion\", and collaborates as a reviewer with many other journals in both computer vision and imprecise probabilities, such as: Artificial Intelligence, the IEEE Transactions on Systems, Man, and Cybernetics - part B, the IEEE Transactions on Fuzzy Systems, Computer Vision and Image Understanding, Information Sciences, the Journal of Risk and Reliability; the International Journal on Uncertainty, Fuzziness, and Knowledge-Based Systems, Image and Vision Computing, the Annals of Operations Research. \n\nDr Cuzzolin has served in the TCP of around 50 international conferences, including BMVC, IPMU and SMC, and is Senior Program Committee member of Uncertainty in Artificial Intelligence (UAI).\nHe was the Program Chair and local organizer of the 3rd International Conference on the Theory of Belief Functions (BELIEF 2014), which was held in St. Hugh's College, Oxford, UK. ","image":"https://0.academia-photos.com/366407/112374/61740579/s200_fabio.cuzzolin.jpg","thumbnailUrl":"https://0.academia-photos.com/366407/112374/61740579/s65_fabio.cuzzolin.jpg","primaryImageOfPage":{"@type":"ImageObject","url":"https://0.academia-photos.com/366407/112374/61740579/s200_fabio.cuzzolin.jpg","width":200},"sameAs":["http://cms.brookes.ac.uk/staff/FabioCuzzolin/","http://perception.inrialpes.fr/people/Cuzzolin/","http://cms.brookes.ac.uk/staff/FabioCuzzolin/bibliography-evidence.html","http://www.bfasociety.org/","http://cms.brookes.ac.uk/staff/FabioCuzzolin/refs-gait.html","http://www.sipta.org/","http://www.linkedin.com/profile/view?id=96024872\u0026locale=en_US\u0026trk=tab_pro","http://arnetminer.org/person/-1472154.html"],"relatedLink":"https://www.academia.edu/127272558/Epistemic_Artificial_Intelligence_Using_random_sets_to_quantify_uncertainty_in_machine_learning"}</script><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/design_system/heading-95367dc03b794f6737f30123738a886cf53b7a65cdef98a922a98591d60063e3.css" /><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/design_system/button-8c9ae4b5c8a2531640c354d92a1f3579c8ff103277ef74913e34c8a76d4e6c00.css" /><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/design_system/body-170d1319f0e354621e81ca17054bb147da2856ec0702fe440a99af314a6338c5.css" /><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/design_system/text_button-d1941ab08e91e29ee143084c4749da4aaffa350a2ac6eec2306b1d7a352d911a.css" /><style type="text/css">@media(max-width: 567px){:root{--token-mode: Parity;--dropshadow: 0 2px 4px 0 #22223340;--primary-brand: #0645b1;--error-dark: #b60000;--success-dark: #05b01c;--inactive-fill: #ebebee;--hover: #0c3b8d;--pressed: #082f75;--button-primary-fill-inactive: #ebebee;--button-primary-fill: #0645b1;--button-primary-text: #ffffff;--button-primary-fill-hover: #0c3b8d;--button-primary-fill-press: #082f75;--button-primary-icon: #ffffff;--button-primary-fill-inverse: #ffffff;--button-primary-text-inverse: #082f75;--button-primary-icon-inverse: #0645b1;--button-primary-fill-inverse-hover: #cddaef;--button-primary-stroke-inverse-pressed: #0645b1;--button-secondary-stroke-inactive: #b1b1ba;--button-secondary-fill: #eef2f9;--button-secondary-text: #082f75;--button-secondary-fill-press: #cddaef;--button-secondary-fill-inactive: #ebebee;--button-secondary-stroke: #cddaef;--button-secondary-stroke-hover: #386ac1;--button-secondary-stroke-press: #0645b1;--button-secondary-text-inactive: #b1b1ba;--button-secondary-icon: #082f75;--button-secondary-fill-hover: #e6ecf7;--button-secondary-stroke-inverse: #ffffff;--button-secondary-fill-inverse: rgba(255, 255, 255, 0);--button-secondary-icon-inverse: #ffffff;--button-secondary-icon-hover: #082f75;--button-secondary-icon-press: #082f75;--button-secondary-text-inverse: #ffffff;--button-secondary-text-hover: #082f75;--button-secondary-text-press: #082f75;--button-secondary-fill-inverse-hover: #043059;--button-xs-stroke: #141413;--button-xs-stroke-hover: #0c3b8d;--button-xs-stroke-press: #082f75;--button-xs-stroke-inactive: #ebebee;--button-xs-text: #141413;--button-xs-text-hover: #0c3b8d;--button-xs-text-press: #082f75;--button-xs-text-inactive: #91919e;--button-xs-icon: #141413;--button-xs-icon-hover: #0c3b8d;--button-xs-icon-press: #082f75;--button-xs-icon-inactive: #91919e;--button-xs-fill: #ffffff;--button-xs-fill-hover: #f4f7fc;--button-xs-fill-press: #eef2f9;--buttons-button-text-inactive: #91919e;--buttons-button-focus: #0645b1;--buttons-button-icon-inactive: #91919e;--buttons-small-buttons-corner-radius: 8px;--buttons-small-buttons-l-r-padding: 12px;--buttons-small-buttons-height: 44px;--buttons-small-buttons-gap: 8px;--buttons-small-buttons-icon-only-width: 44px;--buttons-small-buttons-icon-size: 20px;--buttons-small-buttons-stroke-default: 1px;--buttons-small-buttons-stroke-thick: 2px;--buttons-large-buttons-l-r-padding: 20px;--buttons-large-buttons-height: 54px;--buttons-large-buttons-icon-only-width: 54px;--buttons-large-buttons-icon-size: 20px;--buttons-large-buttons-gap: 8px;--buttons-large-buttons-corner-radius: 8px;--buttons-large-buttons-stroke-default: 1px;--buttons-large-buttons-stroke-thick: 2px;--buttons-extra-small-buttons-l-r-padding: 8px;--buttons-extra-small-buttons-height: 32px;--buttons-extra-small-buttons-icon-size: 16px;--buttons-extra-small-buttons-gap: 4px;--buttons-extra-small-buttons-corner-radius: 8px;--buttons-stroke-default: 1px;--buttons-stroke-thick: 2px;--background-beige: #f9f7f4;--error-light: #fff2f2;--text-placeholder: #6d6d7d;--stroke-dark: #141413;--stroke-light: #dddde2;--stroke-medium: #535366;--accent-green: #ccffd4;--accent-turquoise: #ccf7ff;--accent-yellow: #f7ffcc;--accent-peach: #ffd4cc;--accent-violet: #f7ccff;--accent-purple: #f4f7fc;--text-primary: #141413;--secondary-brand: #141413;--text-hover: #0c3b8d;--text-white: #ffffff;--text-link: #0645b1;--text-press: #082f75;--success-light: #f0f8f1;--background-light-blue: #eef2f9;--background-white: #ffffff;--premium-dark: #877440;--premium-light: #f9f6ed;--stroke-white: #ffffff;--inactive-content: #b1b1ba;--annotate-light: #a35dff;--annotate-dark: #824acc;--grid: #eef2f9;--inactive-stroke: #ebebee;--shadow: rgba(34, 34, 51, 0.25);--text-inactive: #6d6d7d;--text-error: #b60000;--stroke-error: #b60000;--background-error: #fff2f2;--background-black: #141413;--icon-default: #141413;--icon-blue: #0645b1;--background-grey: #dddde2;--icon-grey: #b1b1ba;--text-focus: #082f75;--brand-colors-neutral-black: #141413;--brand-colors-neutral-900: #535366;--brand-colors-neutral-800: #6d6d7d;--brand-colors-neutral-700: #91919e;--brand-colors-neutral-600: #b1b1ba;--brand-colors-neutral-500: #c8c8cf;--brand-colors-neutral-400: #dddde2;--brand-colors-neutral-300: #ebebee;--brand-colors-neutral-200: #f8f8fb;--brand-colors-neutral-100: #fafafa;--brand-colors-neutral-white: #ffffff;--brand-colors-blue-900: #043059;--brand-colors-blue-800: #082f75;--brand-colors-blue-700: #0c3b8d;--brand-colors-blue-600: #0645b1;--brand-colors-blue-500: #386ac1;--brand-colors-blue-400: #cddaef;--brand-colors-blue-300: #e6ecf7;--brand-colors-blue-200: #eef2f9;--brand-colors-blue-100: #f4f7fc;--brand-colors-gold-500: #877440;--brand-colors-gold-400: #e9e3d4;--brand-colors-gold-300: #f2efe8;--brand-colors-gold-200: #f9f6ed;--brand-colors-gold-100: #f9f7f4;--brand-colors-error-900: #920000;--brand-colors-error-500: #b60000;--brand-colors-success-900: #035c0f;--brand-colors-green: #ccffd4;--brand-colors-turquoise: #ccf7ff;--brand-colors-yellow: #f7ffcc;--brand-colors-peach: #ffd4cc;--brand-colors-violet: #f7ccff;--brand-colors-error-100: #fff2f2;--brand-colors-success-500: #05b01c;--brand-colors-success-100: #f0f8f1;--text-secondary: #535366;--icon-white: #ffffff;--background-beige-darker: #f2efe8;--icon-dark-grey: #535366;--type-font-family-sans-serif: Roboto;--type-font-family-serif: Georgia;--type-font-family-mono: IBM Plex Mono;--type-weights-300: 300;--type-weights-400: 400;--type-weights-500: 500;--type-weights-700: 700;--type-sizes-12: 12px;--type-sizes-14: 14px;--type-sizes-16: 16px;--type-sizes-18: 18px;--type-sizes-20: 20px;--type-sizes-22: 22px;--type-sizes-24: 24px;--type-sizes-28: 28px;--type-sizes-30: 30px;--type-sizes-32: 32px;--type-sizes-40: 40px;--type-sizes-42: 42px;--type-sizes-48-2: 48px;--type-line-heights-16: 16px;--type-line-heights-20: 20px;--type-line-heights-23: 23px;--type-line-heights-24: 24px;--type-line-heights-25: 25px;--type-line-heights-26: 26px;--type-line-heights-29: 29px;--type-line-heights-30: 30px;--type-line-heights-32: 32px;--type-line-heights-34: 34px;--type-line-heights-35: 35px;--type-line-heights-36: 36px;--type-line-heights-38: 38px;--type-line-heights-40: 40px;--type-line-heights-46: 46px;--type-line-heights-48: 48px;--type-line-heights-52: 52px;--type-line-heights-58: 58px;--type-line-heights-68: 68px;--type-line-heights-74: 74px;--type-line-heights-82: 82px;--type-paragraph-spacings-0: 0px;--type-paragraph-spacings-4: 4px;--type-paragraph-spacings-8: 8px;--type-paragraph-spacings-16: 16px;--type-sans-serif-xl-font-weight: 400;--type-sans-serif-xl-size: 32px;--type-sans-serif-xl-line-height: 46px;--type-sans-serif-xl-paragraph-spacing: 16px;--type-sans-serif-lg-font-weight: 400;--type-sans-serif-lg-size: 30px;--type-sans-serif-lg-line-height: 36px;--type-sans-serif-lg-paragraph-spacing: 16px;--type-sans-serif-md-font-weight: 400;--type-sans-serif-md-line-height: 30px;--type-sans-serif-md-paragraph-spacing: 16px;--type-sans-serif-md-size: 24px;--type-sans-serif-xs-font-weight: 700;--type-sans-serif-xs-line-height: 24px;--type-sans-serif-xs-paragraph-spacing: 0px;--type-sans-serif-xs-size: 18px;--type-sans-serif-sm-font-weight: 400;--type-sans-serif-sm-line-height: 32px;--type-sans-serif-sm-paragraph-spacing: 16px;--type-sans-serif-sm-size: 20px;--type-body-xl-font-weight: 400;--type-body-xl-size: 24px;--type-body-xl-line-height: 36px;--type-body-xl-paragraph-spacing: 0px;--type-body-sm-font-weight: 400;--type-body-sm-size: 14px;--type-body-sm-line-height: 20px;--type-body-sm-paragraph-spacing: 8px;--type-body-xs-font-weight: 400;--type-body-xs-size: 12px;--type-body-xs-line-height: 16px;--type-body-xs-paragraph-spacing: 0px;--type-body-md-font-weight: 400;--type-body-md-size: 16px;--type-body-md-line-height: 20px;--type-body-md-paragraph-spacing: 4px;--type-body-lg-font-weight: 400;--type-body-lg-size: 20px;--type-body-lg-line-height: 26px;--type-body-lg-paragraph-spacing: 16px;--type-body-lg-medium-font-weight: 500;--type-body-lg-medium-size: 20px;--type-body-lg-medium-line-height: 32px;--type-body-lg-medium-paragraph-spacing: 16px;--type-body-md-medium-font-weight: 500;--type-body-md-medium-size: 16px;--type-body-md-medium-line-height: 20px;--type-body-md-medium-paragraph-spacing: 4px;--type-body-sm-bold-font-weight: 700;--type-body-sm-bold-size: 14px;--type-body-sm-bold-line-height: 20px;--type-body-sm-bold-paragraph-spacing: 8px;--type-body-sm-medium-font-weight: 500;--type-body-sm-medium-size: 14px;--type-body-sm-medium-line-height: 20px;--type-body-sm-medium-paragraph-spacing: 8px;--type-serif-md-font-weight: 400;--type-serif-md-size: 32px;--type-serif-md-paragraph-spacing: 0px;--type-serif-md-line-height: 40px;--type-serif-sm-font-weight: 400;--type-serif-sm-size: 24px;--type-serif-sm-paragraph-spacing: 0px;--type-serif-sm-line-height: 26px;--type-serif-lg-font-weight: 400;--type-serif-lg-size: 48px;--type-serif-lg-paragraph-spacing: 0px;--type-serif-lg-line-height: 52px;--type-serif-xs-font-weight: 400;--type-serif-xs-size: 18px;--type-serif-xs-line-height: 24px;--type-serif-xs-paragraph-spacing: 0px;--type-serif-xl-font-weight: 400;--type-serif-xl-size: 48px;--type-serif-xl-paragraph-spacing: 0px;--type-serif-xl-line-height: 58px;--type-mono-md-font-weight: 400;--type-mono-md-size: 22px;--type-mono-md-line-height: 24px;--type-mono-md-paragraph-spacing: 0px;--type-mono-lg-font-weight: 400;--type-mono-lg-size: 40px;--type-mono-lg-line-height: 40px;--type-mono-lg-paragraph-spacing: 0px;--type-mono-sm-font-weight: 400;--type-mono-sm-size: 14px;--type-mono-sm-line-height: 24px;--type-mono-sm-paragraph-spacing: 0px;--spacing-xs-4: 4px;--spacing-xs-8: 8px;--spacing-xs-16: 16px;--spacing-sm-24: 24px;--spacing-sm-32: 32px;--spacing-md-40: 40px;--spacing-md-48: 48px;--spacing-lg-64: 64px;--spacing-lg-80: 80px;--spacing-xlg-104: 104px;--spacing-xlg-152: 152px;--spacing-xs-12: 12px;--spacing-page-section: 80px;--spacing-card-list-spacing: 48px;--spacing-text-section-spacing: 64px;--spacing-md-xs-headings: 40px;--corner-radius-radius-lg: 16px;--corner-radius-radius-sm: 4px;--corner-radius-radius-md: 8px;--corner-radius-radius-round: 104px}}@media(min-width: 568px)and (max-width: 1279px){:root{--token-mode: Parity;--dropshadow: 0 2px 4px 0 #22223340;--primary-brand: #0645b1;--error-dark: #b60000;--success-dark: #05b01c;--inactive-fill: #ebebee;--hover: #0c3b8d;--pressed: #082f75;--button-primary-fill-inactive: #ebebee;--button-primary-fill: #0645b1;--button-primary-text: #ffffff;--button-primary-fill-hover: #0c3b8d;--button-primary-fill-press: #082f75;--button-primary-icon: #ffffff;--button-primary-fill-inverse: #ffffff;--button-primary-text-inverse: #082f75;--button-primary-icon-inverse: #0645b1;--button-primary-fill-inverse-hover: #cddaef;--button-primary-stroke-inverse-pressed: #0645b1;--button-secondary-stroke-inactive: #b1b1ba;--button-secondary-fill: #eef2f9;--button-secondary-text: #082f75;--button-secondary-fill-press: #cddaef;--button-secondary-fill-inactive: #ebebee;--button-secondary-stroke: #cddaef;--button-secondary-stroke-hover: #386ac1;--button-secondary-stroke-press: #0645b1;--button-secondary-text-inactive: #b1b1ba;--button-secondary-icon: #082f75;--button-secondary-fill-hover: #e6ecf7;--button-secondary-stroke-inverse: #ffffff;--button-secondary-fill-inverse: rgba(255, 255, 255, 0);--button-secondary-icon-inverse: #ffffff;--button-secondary-icon-hover: #082f75;--button-secondary-icon-press: #082f75;--button-secondary-text-inverse: #ffffff;--button-secondary-text-hover: #082f75;--button-secondary-text-press: #082f75;--button-secondary-fill-inverse-hover: #043059;--button-xs-stroke: #141413;--button-xs-stroke-hover: #0c3b8d;--button-xs-stroke-press: #082f75;--button-xs-stroke-inactive: #ebebee;--button-xs-text: #141413;--button-xs-text-hover: #0c3b8d;--button-xs-text-press: #082f75;--button-xs-text-inactive: #91919e;--button-xs-icon: #141413;--button-xs-icon-hover: #0c3b8d;--button-xs-icon-press: #082f75;--button-xs-icon-inactive: #91919e;--button-xs-fill: #ffffff;--button-xs-fill-hover: #f4f7fc;--button-xs-fill-press: #eef2f9;--buttons-button-text-inactive: #91919e;--buttons-button-focus: #0645b1;--buttons-button-icon-inactive: #91919e;--buttons-small-buttons-corner-radius: 8px;--buttons-small-buttons-l-r-padding: 12px;--buttons-small-buttons-height: 44px;--buttons-small-buttons-gap: 8px;--buttons-small-buttons-icon-only-width: 44px;--buttons-small-buttons-icon-size: 20px;--buttons-small-buttons-stroke-default: 1px;--buttons-small-buttons-stroke-thick: 2px;--buttons-large-buttons-l-r-padding: 20px;--buttons-large-buttons-height: 54px;--buttons-large-buttons-icon-only-width: 54px;--buttons-large-buttons-icon-size: 20px;--buttons-large-buttons-gap: 8px;--buttons-large-buttons-corner-radius: 8px;--buttons-large-buttons-stroke-default: 1px;--buttons-large-buttons-stroke-thick: 2px;--buttons-extra-small-buttons-l-r-padding: 8px;--buttons-extra-small-buttons-height: 32px;--buttons-extra-small-buttons-icon-size: 16px;--buttons-extra-small-buttons-gap: 4px;--buttons-extra-small-buttons-corner-radius: 8px;--buttons-stroke-default: 1px;--buttons-stroke-thick: 2px;--background-beige: #f9f7f4;--error-light: #fff2f2;--text-placeholder: #6d6d7d;--stroke-dark: #141413;--stroke-light: #dddde2;--stroke-medium: #535366;--accent-green: #ccffd4;--accent-turquoise: #ccf7ff;--accent-yellow: #f7ffcc;--accent-peach: #ffd4cc;--accent-violet: #f7ccff;--accent-purple: #f4f7fc;--text-primary: #141413;--secondary-brand: #141413;--text-hover: #0c3b8d;--text-white: #ffffff;--text-link: #0645b1;--text-press: #082f75;--success-light: #f0f8f1;--background-light-blue: #eef2f9;--background-white: #ffffff;--premium-dark: #877440;--premium-light: #f9f6ed;--stroke-white: #ffffff;--inactive-content: #b1b1ba;--annotate-light: #a35dff;--annotate-dark: #824acc;--grid: #eef2f9;--inactive-stroke: #ebebee;--shadow: rgba(34, 34, 51, 0.25);--text-inactive: #6d6d7d;--text-error: #b60000;--stroke-error: #b60000;--background-error: #fff2f2;--background-black: #141413;--icon-default: #141413;--icon-blue: #0645b1;--background-grey: #dddde2;--icon-grey: #b1b1ba;--text-focus: #082f75;--brand-colors-neutral-black: #141413;--brand-colors-neutral-900: #535366;--brand-colors-neutral-800: #6d6d7d;--brand-colors-neutral-700: #91919e;--brand-colors-neutral-600: #b1b1ba;--brand-colors-neutral-500: #c8c8cf;--brand-colors-neutral-400: #dddde2;--brand-colors-neutral-300: #ebebee;--brand-colors-neutral-200: #f8f8fb;--brand-colors-neutral-100: #fafafa;--brand-colors-neutral-white: #ffffff;--brand-colors-blue-900: #043059;--brand-colors-blue-800: #082f75;--brand-colors-blue-700: #0c3b8d;--brand-colors-blue-600: #0645b1;--brand-colors-blue-500: #386ac1;--brand-colors-blue-400: #cddaef;--brand-colors-blue-300: #e6ecf7;--brand-colors-blue-200: #eef2f9;--brand-colors-blue-100: #f4f7fc;--brand-colors-gold-500: #877440;--brand-colors-gold-400: #e9e3d4;--brand-colors-gold-300: #f2efe8;--brand-colors-gold-200: #f9f6ed;--brand-colors-gold-100: #f9f7f4;--brand-colors-error-900: #920000;--brand-colors-error-500: #b60000;--brand-colors-success-900: #035c0f;--brand-colors-green: #ccffd4;--brand-colors-turquoise: #ccf7ff;--brand-colors-yellow: #f7ffcc;--brand-colors-peach: #ffd4cc;--brand-colors-violet: #f7ccff;--brand-colors-error-100: #fff2f2;--brand-colors-success-500: #05b01c;--brand-colors-success-100: #f0f8f1;--text-secondary: #535366;--icon-white: #ffffff;--background-beige-darker: #f2efe8;--icon-dark-grey: #535366;--type-font-family-sans-serif: Roboto;--type-font-family-serif: Georgia;--type-font-family-mono: IBM Plex Mono;--type-weights-300: 300;--type-weights-400: 400;--type-weights-500: 500;--type-weights-700: 700;--type-sizes-12: 12px;--type-sizes-14: 14px;--type-sizes-16: 16px;--type-sizes-18: 18px;--type-sizes-20: 20px;--type-sizes-22: 22px;--type-sizes-24: 24px;--type-sizes-28: 28px;--type-sizes-30: 30px;--type-sizes-32: 32px;--type-sizes-40: 40px;--type-sizes-42: 42px;--type-sizes-48-2: 48px;--type-line-heights-16: 16px;--type-line-heights-20: 20px;--type-line-heights-23: 23px;--type-line-heights-24: 24px;--type-line-heights-25: 25px;--type-line-heights-26: 26px;--type-line-heights-29: 29px;--type-line-heights-30: 30px;--type-line-heights-32: 32px;--type-line-heights-34: 34px;--type-line-heights-35: 35px;--type-line-heights-36: 36px;--type-line-heights-38: 38px;--type-line-heights-40: 40px;--type-line-heights-46: 46px;--type-line-heights-48: 48px;--type-line-heights-52: 52px;--type-line-heights-58: 58px;--type-line-heights-68: 68px;--type-line-heights-74: 74px;--type-line-heights-82: 82px;--type-paragraph-spacings-0: 0px;--type-paragraph-spacings-4: 4px;--type-paragraph-spacings-8: 8px;--type-paragraph-spacings-16: 16px;--type-sans-serif-xl-font-weight: 400;--type-sans-serif-xl-size: 42px;--type-sans-serif-xl-line-height: 46px;--type-sans-serif-xl-paragraph-spacing: 16px;--type-sans-serif-lg-font-weight: 400;--type-sans-serif-lg-size: 32px;--type-sans-serif-lg-line-height: 36px;--type-sans-serif-lg-paragraph-spacing: 16px;--type-sans-serif-md-font-weight: 400;--type-sans-serif-md-line-height: 34px;--type-sans-serif-md-paragraph-spacing: 16px;--type-sans-serif-md-size: 28px;--type-sans-serif-xs-font-weight: 700;--type-sans-serif-xs-line-height: 25px;--type-sans-serif-xs-paragraph-spacing: 0px;--type-sans-serif-xs-size: 20px;--type-sans-serif-sm-font-weight: 400;--type-sans-serif-sm-line-height: 30px;--type-sans-serif-sm-paragraph-spacing: 16px;--type-sans-serif-sm-size: 24px;--type-body-xl-font-weight: 400;--type-body-xl-size: 24px;--type-body-xl-line-height: 36px;--type-body-xl-paragraph-spacing: 0px;--type-body-sm-font-weight: 400;--type-body-sm-size: 14px;--type-body-sm-line-height: 20px;--type-body-sm-paragraph-spacing: 8px;--type-body-xs-font-weight: 400;--type-body-xs-size: 12px;--type-body-xs-line-height: 16px;--type-body-xs-paragraph-spacing: 0px;--type-body-md-font-weight: 400;--type-body-md-size: 16px;--type-body-md-line-height: 20px;--type-body-md-paragraph-spacing: 4px;--type-body-lg-font-weight: 400;--type-body-lg-size: 20px;--type-body-lg-line-height: 26px;--type-body-lg-paragraph-spacing: 16px;--type-body-lg-medium-font-weight: 500;--type-body-lg-medium-size: 20px;--type-body-lg-medium-line-height: 32px;--type-body-lg-medium-paragraph-spacing: 16px;--type-body-md-medium-font-weight: 500;--type-body-md-medium-size: 16px;--type-body-md-medium-line-height: 20px;--type-body-md-medium-paragraph-spacing: 4px;--type-body-sm-bold-font-weight: 700;--type-body-sm-bold-size: 14px;--type-body-sm-bold-line-height: 20px;--type-body-sm-bold-paragraph-spacing: 8px;--type-body-sm-medium-font-weight: 500;--type-body-sm-medium-size: 14px;--type-body-sm-medium-line-height: 20px;--type-body-sm-medium-paragraph-spacing: 8px;--type-serif-md-font-weight: 400;--type-serif-md-size: 40px;--type-serif-md-paragraph-spacing: 0px;--type-serif-md-line-height: 48px;--type-serif-sm-font-weight: 400;--type-serif-sm-size: 28px;--type-serif-sm-paragraph-spacing: 0px;--type-serif-sm-line-height: 32px;--type-serif-lg-font-weight: 400;--type-serif-lg-size: 58px;--type-serif-lg-paragraph-spacing: 0px;--type-serif-lg-line-height: 68px;--type-serif-xs-font-weight: 400;--type-serif-xs-size: 18px;--type-serif-xs-line-height: 24px;--type-serif-xs-paragraph-spacing: 0px;--type-serif-xl-font-weight: 400;--type-serif-xl-size: 74px;--type-serif-xl-paragraph-spacing: 0px;--type-serif-xl-line-height: 82px;--type-mono-md-font-weight: 400;--type-mono-md-size: 22px;--type-mono-md-line-height: 24px;--type-mono-md-paragraph-spacing: 0px;--type-mono-lg-font-weight: 400;--type-mono-lg-size: 40px;--type-mono-lg-line-height: 40px;--type-mono-lg-paragraph-spacing: 0px;--type-mono-sm-font-weight: 400;--type-mono-sm-size: 14px;--type-mono-sm-line-height: 24px;--type-mono-sm-paragraph-spacing: 0px;--spacing-xs-4: 4px;--spacing-xs-8: 8px;--spacing-xs-16: 16px;--spacing-sm-24: 24px;--spacing-sm-32: 32px;--spacing-md-40: 40px;--spacing-md-48: 48px;--spacing-lg-64: 64px;--spacing-lg-80: 80px;--spacing-xlg-104: 104px;--spacing-xlg-152: 152px;--spacing-xs-12: 12px;--spacing-page-section: 104px;--spacing-card-list-spacing: 48px;--spacing-text-section-spacing: 80px;--spacing-md-xs-headings: 40px;--corner-radius-radius-lg: 16px;--corner-radius-radius-sm: 4px;--corner-radius-radius-md: 8px;--corner-radius-radius-round: 104px}}@media(min-width: 1280px){:root{--token-mode: Parity;--dropshadow: 0 2px 4px 0 #22223340;--primary-brand: #0645b1;--error-dark: #b60000;--success-dark: #05b01c;--inactive-fill: #ebebee;--hover: #0c3b8d;--pressed: #082f75;--button-primary-fill-inactive: #ebebee;--button-primary-fill: #0645b1;--button-primary-text: #ffffff;--button-primary-fill-hover: #0c3b8d;--button-primary-fill-press: #082f75;--button-primary-icon: #ffffff;--button-primary-fill-inverse: #ffffff;--button-primary-text-inverse: #082f75;--button-primary-icon-inverse: #0645b1;--button-primary-fill-inverse-hover: #cddaef;--button-primary-stroke-inverse-pressed: #0645b1;--button-secondary-stroke-inactive: #b1b1ba;--button-secondary-fill: #eef2f9;--button-secondary-text: #082f75;--button-secondary-fill-press: #cddaef;--button-secondary-fill-inactive: #ebebee;--button-secondary-stroke: #cddaef;--button-secondary-stroke-hover: #386ac1;--button-secondary-stroke-press: #0645b1;--button-secondary-text-inactive: #b1b1ba;--button-secondary-icon: #082f75;--button-secondary-fill-hover: #e6ecf7;--button-secondary-stroke-inverse: #ffffff;--button-secondary-fill-inverse: rgba(255, 255, 255, 0);--button-secondary-icon-inverse: #ffffff;--button-secondary-icon-hover: #082f75;--button-secondary-icon-press: #082f75;--button-secondary-text-inverse: #ffffff;--button-secondary-text-hover: #082f75;--button-secondary-text-press: #082f75;--button-secondary-fill-inverse-hover: #043059;--button-xs-stroke: #141413;--button-xs-stroke-hover: #0c3b8d;--button-xs-stroke-press: #082f75;--button-xs-stroke-inactive: #ebebee;--button-xs-text: #141413;--button-xs-text-hover: #0c3b8d;--button-xs-text-press: #082f75;--button-xs-text-inactive: #91919e;--button-xs-icon: #141413;--button-xs-icon-hover: #0c3b8d;--button-xs-icon-press: #082f75;--button-xs-icon-inactive: #91919e;--button-xs-fill: #ffffff;--button-xs-fill-hover: #f4f7fc;--button-xs-fill-press: #eef2f9;--buttons-button-text-inactive: #91919e;--buttons-button-focus: #0645b1;--buttons-button-icon-inactive: #91919e;--buttons-small-buttons-corner-radius: 8px;--buttons-small-buttons-l-r-padding: 12px;--buttons-small-buttons-height: 44px;--buttons-small-buttons-gap: 8px;--buttons-small-buttons-icon-only-width: 44px;--buttons-small-buttons-icon-size: 20px;--buttons-small-buttons-stroke-default: 1px;--buttons-small-buttons-stroke-thick: 2px;--buttons-large-buttons-l-r-padding: 20px;--buttons-large-buttons-height: 54px;--buttons-large-buttons-icon-only-width: 54px;--buttons-large-buttons-icon-size: 20px;--buttons-large-buttons-gap: 8px;--buttons-large-buttons-corner-radius: 8px;--buttons-large-buttons-stroke-default: 1px;--buttons-large-buttons-stroke-thick: 2px;--buttons-extra-small-buttons-l-r-padding: 8px;--buttons-extra-small-buttons-height: 32px;--buttons-extra-small-buttons-icon-size: 16px;--buttons-extra-small-buttons-gap: 4px;--buttons-extra-small-buttons-corner-radius: 8px;--buttons-stroke-default: 1px;--buttons-stroke-thick: 2px;--background-beige: #f9f7f4;--error-light: #fff2f2;--text-placeholder: #6d6d7d;--stroke-dark: #141413;--stroke-light: #dddde2;--stroke-medium: #535366;--accent-green: #ccffd4;--accent-turquoise: #ccf7ff;--accent-yellow: #f7ffcc;--accent-peach: #ffd4cc;--accent-violet: #f7ccff;--accent-purple: #f4f7fc;--text-primary: #141413;--secondary-brand: #141413;--text-hover: #0c3b8d;--text-white: #ffffff;--text-link: #0645b1;--text-press: #082f75;--success-light: #f0f8f1;--background-light-blue: #eef2f9;--background-white: #ffffff;--premium-dark: #877440;--premium-light: #f9f6ed;--stroke-white: #ffffff;--inactive-content: #b1b1ba;--annotate-light: #a35dff;--annotate-dark: #824acc;--grid: #eef2f9;--inactive-stroke: #ebebee;--shadow: rgba(34, 34, 51, 0.25);--text-inactive: #6d6d7d;--text-error: #b60000;--stroke-error: #b60000;--background-error: #fff2f2;--background-black: #141413;--icon-default: #141413;--icon-blue: #0645b1;--background-grey: #dddde2;--icon-grey: #b1b1ba;--text-focus: #082f75;--brand-colors-neutral-black: #141413;--brand-colors-neutral-900: #535366;--brand-colors-neutral-800: #6d6d7d;--brand-colors-neutral-700: #91919e;--brand-colors-neutral-600: #b1b1ba;--brand-colors-neutral-500: #c8c8cf;--brand-colors-neutral-400: #dddde2;--brand-colors-neutral-300: #ebebee;--brand-colors-neutral-200: #f8f8fb;--brand-colors-neutral-100: #fafafa;--brand-colors-neutral-white: #ffffff;--brand-colors-blue-900: #043059;--brand-colors-blue-800: #082f75;--brand-colors-blue-700: #0c3b8d;--brand-colors-blue-600: #0645b1;--brand-colors-blue-500: #386ac1;--brand-colors-blue-400: #cddaef;--brand-colors-blue-300: #e6ecf7;--brand-colors-blue-200: #eef2f9;--brand-colors-blue-100: #f4f7fc;--brand-colors-gold-500: #877440;--brand-colors-gold-400: #e9e3d4;--brand-colors-gold-300: #f2efe8;--brand-colors-gold-200: #f9f6ed;--brand-colors-gold-100: #f9f7f4;--brand-colors-error-900: #920000;--brand-colors-error-500: #b60000;--brand-colors-success-900: #035c0f;--brand-colors-green: #ccffd4;--brand-colors-turquoise: #ccf7ff;--brand-colors-yellow: #f7ffcc;--brand-colors-peach: #ffd4cc;--brand-colors-violet: #f7ccff;--brand-colors-error-100: #fff2f2;--brand-colors-success-500: #05b01c;--brand-colors-success-100: #f0f8f1;--text-secondary: #535366;--icon-white: #ffffff;--background-beige-darker: #f2efe8;--icon-dark-grey: #535366;--type-font-family-sans-serif: Roboto;--type-font-family-serif: Georgia;--type-font-family-mono: IBM Plex Mono;--type-weights-300: 300;--type-weights-400: 400;--type-weights-500: 500;--type-weights-700: 700;--type-sizes-12: 12px;--type-sizes-14: 14px;--type-sizes-16: 16px;--type-sizes-18: 18px;--type-sizes-20: 20px;--type-sizes-22: 22px;--type-sizes-24: 24px;--type-sizes-28: 28px;--type-sizes-30: 30px;--type-sizes-32: 32px;--type-sizes-40: 40px;--type-sizes-42: 42px;--type-sizes-48-2: 48px;--type-line-heights-16: 16px;--type-line-heights-20: 20px;--type-line-heights-23: 23px;--type-line-heights-24: 24px;--type-line-heights-25: 25px;--type-line-heights-26: 26px;--type-line-heights-29: 29px;--type-line-heights-30: 30px;--type-line-heights-32: 32px;--type-line-heights-34: 34px;--type-line-heights-35: 35px;--type-line-heights-36: 36px;--type-line-heights-38: 38px;--type-line-heights-40: 40px;--type-line-heights-46: 46px;--type-line-heights-48: 48px;--type-line-heights-52: 52px;--type-line-heights-58: 58px;--type-line-heights-68: 68px;--type-line-heights-74: 74px;--type-line-heights-82: 82px;--type-paragraph-spacings-0: 0px;--type-paragraph-spacings-4: 4px;--type-paragraph-spacings-8: 8px;--type-paragraph-spacings-16: 16px;--type-sans-serif-xl-font-weight: 400;--type-sans-serif-xl-size: 42px;--type-sans-serif-xl-line-height: 46px;--type-sans-serif-xl-paragraph-spacing: 16px;--type-sans-serif-lg-font-weight: 400;--type-sans-serif-lg-size: 32px;--type-sans-serif-lg-line-height: 38px;--type-sans-serif-lg-paragraph-spacing: 16px;--type-sans-serif-md-font-weight: 400;--type-sans-serif-md-line-height: 34px;--type-sans-serif-md-paragraph-spacing: 16px;--type-sans-serif-md-size: 28px;--type-sans-serif-xs-font-weight: 700;--type-sans-serif-xs-line-height: 25px;--type-sans-serif-xs-paragraph-spacing: 0px;--type-sans-serif-xs-size: 20px;--type-sans-serif-sm-font-weight: 400;--type-sans-serif-sm-line-height: 30px;--type-sans-serif-sm-paragraph-spacing: 16px;--type-sans-serif-sm-size: 24px;--type-body-xl-font-weight: 400;--type-body-xl-size: 24px;--type-body-xl-line-height: 36px;--type-body-xl-paragraph-spacing: 0px;--type-body-sm-font-weight: 400;--type-body-sm-size: 14px;--type-body-sm-line-height: 20px;--type-body-sm-paragraph-spacing: 8px;--type-body-xs-font-weight: 400;--type-body-xs-size: 12px;--type-body-xs-line-height: 16px;--type-body-xs-paragraph-spacing: 0px;--type-body-md-font-weight: 400;--type-body-md-size: 16px;--type-body-md-line-height: 20px;--type-body-md-paragraph-spacing: 4px;--type-body-lg-font-weight: 400;--type-body-lg-size: 20px;--type-body-lg-line-height: 26px;--type-body-lg-paragraph-spacing: 16px;--type-body-lg-medium-font-weight: 500;--type-body-lg-medium-size: 20px;--type-body-lg-medium-line-height: 32px;--type-body-lg-medium-paragraph-spacing: 16px;--type-body-md-medium-font-weight: 500;--type-body-md-medium-size: 16px;--type-body-md-medium-line-height: 20px;--type-body-md-medium-paragraph-spacing: 4px;--type-body-sm-bold-font-weight: 700;--type-body-sm-bold-size: 14px;--type-body-sm-bold-line-height: 20px;--type-body-sm-bold-paragraph-spacing: 8px;--type-body-sm-medium-font-weight: 500;--type-body-sm-medium-size: 14px;--type-body-sm-medium-line-height: 20px;--type-body-sm-medium-paragraph-spacing: 8px;--type-serif-md-font-weight: 400;--type-serif-md-size: 40px;--type-serif-md-paragraph-spacing: 0px;--type-serif-md-line-height: 48px;--type-serif-sm-font-weight: 400;--type-serif-sm-size: 28px;--type-serif-sm-paragraph-spacing: 0px;--type-serif-sm-line-height: 32px;--type-serif-lg-font-weight: 400;--type-serif-lg-size: 58px;--type-serif-lg-paragraph-spacing: 0px;--type-serif-lg-line-height: 68px;--type-serif-xs-font-weight: 400;--type-serif-xs-size: 18px;--type-serif-xs-line-height: 24px;--type-serif-xs-paragraph-spacing: 0px;--type-serif-xl-font-weight: 400;--type-serif-xl-size: 74px;--type-serif-xl-paragraph-spacing: 0px;--type-serif-xl-line-height: 82px;--type-mono-md-font-weight: 400;--type-mono-md-size: 22px;--type-mono-md-line-height: 24px;--type-mono-md-paragraph-spacing: 0px;--type-mono-lg-font-weight: 400;--type-mono-lg-size: 40px;--type-mono-lg-line-height: 40px;--type-mono-lg-paragraph-spacing: 0px;--type-mono-sm-font-weight: 400;--type-mono-sm-size: 14px;--type-mono-sm-line-height: 24px;--type-mono-sm-paragraph-spacing: 0px;--spacing-xs-4: 4px;--spacing-xs-8: 8px;--spacing-xs-16: 16px;--spacing-sm-24: 24px;--spacing-sm-32: 32px;--spacing-md-40: 40px;--spacing-md-48: 48px;--spacing-lg-64: 64px;--spacing-lg-80: 80px;--spacing-xlg-104: 104px;--spacing-xlg-152: 152px;--spacing-xs-12: 12px;--spacing-page-section: 152px;--spacing-card-list-spacing: 48px;--spacing-text-section-spacing: 80px;--spacing-md-xs-headings: 40px;--corner-radius-radius-lg: 16px;--corner-radius-radius-sm: 4px;--corner-radius-radius-md: 8px;--corner-radius-radius-round: 104px}}</style><link crossorigin="" href="https://fonts.gstatic.com/" rel="preconnect" /><link href="https://fonts.googleapis.com/css2?family=DM+Sans:ital,opsz,wght@0,9..40,100..1000;1,9..40,100..1000&family=Gupter:wght@400;500;700&family=IBM+Plex+Mono:wght@300;400&family=Material+Symbols+Outlined:opsz,wght,FILL,GRAD@20,400,0,0&display=swap" rel="stylesheet" /><link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/design_system/common-57f9da13cef3fd4e2a8b655342c6488eded3e557e823fe67571f2ac77acd7b6f.css" /> <meta name="author" content="fabio cuzzolin" /> <meta name="description" content="Fabio Cuzzolin was born in Jesolo, Italy. He graduated magna cum laude with the University of Padua, was awarded a Ph.D. there in 2001 for a thesis entitled…" /> <meta name="google-site-verification" content="bKJMBZA7E43xhDOopFZkssMMkBRjvYERV-NaN4R6mrs" /> <script> var $controller_name = 'works'; var $action_name = "summary"; var $rails_env = 'production'; var $app_rev = '9744e839ffe2d813ef8b7eb988ae0a3341a6052d'; var $domain = 'academia.edu'; var $app_host = "academia.edu"; var $asset_host = "academia-assets.com"; var $start_time = new Date().getTime(); var $recaptcha_key = "6LdxlRMTAAAAADnu_zyLhLg0YF9uACwz78shpjJB"; var $recaptcha_invisible_key = "6Lf3KHUUAAAAACggoMpmGJdQDtiyrjVlvGJ6BbAj"; var $disableClientRecordHit = false; </script> <script> window.Aedu = { hit_data: null }; window.Aedu.SiteStats = {"premium_universities_count":13919,"monthly_visitors":"139 million","monthly_visitor_count":139296536,"monthly_visitor_count_in_millions":139,"user_count":286073059,"paper_count":55203019,"paper_count_in_millions":55,"page_count":432000000,"page_count_in_millions":432,"pdf_count":16500000,"pdf_count_in_millions":16}; window.Aedu.serverRenderTime = new Date(1743420771000); window.Aedu.timeDifference = new Date().getTime() - 1743420771000; window.Aedu.isUsingCssV1 = false; window.Aedu.enableLocalization = true; window.Aedu.activateFullstory = false; window.Aedu.serviceAvailability = { status: {"attention_db":"on","bibliography_db":"on","contacts_db":"on","email_db":"on","indexability_db":"on","mentions_db":"on","news_db":"on","notifications_db":"on","offsite_mentions_db":"on","redshift":"on","redshift_exports_db":"on","related_works_db":"on","ring_db":"on","user_tests_db":"on"}, serviceEnabled: function(service) { return this.status[service] === "on"; }, readEnabled: function(service) { return this.serviceEnabled(service) || this.status[service] === "read_only"; }, }; window.Aedu.viewApmTrace = function() { // Check if x-apm-trace-id meta tag is set, and open the trace in APM // in a new window if it is. var apmTraceId = document.head.querySelector('meta[name="x-apm-trace-id"]'); if (apmTraceId) { var traceId = apmTraceId.content; // Use trace ID to construct URL, an example URL looks like: // https://app.datadoghq.com/apm/traces?query=trace_id%31298410148923562634 var apmUrl = 'https://app.datadoghq.com/apm/traces?query=trace_id%3A' + traceId; window.open(apmUrl, '_blank'); } }; </script> <!--[if lt IE 9]> <script src="//cdnjs.cloudflare.com/ajax/libs/html5shiv/3.7.2/html5shiv.min.js"></script> <![endif]--> <link href="https://fonts.googleapis.com/css?family=Roboto:100,100i,300,300i,400,400i,500,500i,700,700i,900,900i" rel="stylesheet"> <link rel="preload" href="//maxcdn.bootstrapcdn.com/font-awesome/4.3.0/css/font-awesome.min.css" as="style" onload="this.rel='stylesheet'"> <link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/libraries-a9675dcb01ec4ef6aa807ba772c7a5a00c1820d3ff661c1038a20f80d06bb4e4.css" /> <link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/academia-9982828ed1de4777566441c35ccf7157c55ca779141fce69380d727ebdbbb926.css" /> <link rel="stylesheet" media="all" href="//a.academia-assets.com/assets/design_system_legacy-056a9113b9a0f5343d013b29ee1929d5a18be35fdcdceb616600b4db8bd20054.css" /> <script src="//a.academia-assets.com/assets/webpack_bundles/runtime-bundle-005434038af4252ca37c527588411a3d6a0eabb5f727fac83f8bbe7fd88d93bb.js"></script> <script src="//a.academia-assets.com/assets/webpack_bundles/webpack_libraries_and_infrequently_changed.wjs-bundle-cf157bca4ef673abcac8051ac68ed1136134beba22a884388e7ed6391572eef4.js"></script> <script src="//a.academia-assets.com/assets/webpack_bundles/core_webpack.wjs-bundle-f96ab8a6334d161855249975a57d3f3d57f65c2e7553c6d20ab43c63efb79575.js"></script> <script src="//a.academia-assets.com/assets/webpack_bundles/sentry.wjs-bundle-5fe03fddca915c8ba0f7edbe64c194308e8ce5abaed7bffe1255ff37549c4808.js"></script> <script> jade = window.jade || {}; jade.helpers = window.$h; jade._ = window._; </script> <!-- Google Tag Manager --> <script id="tag-manager-head-root">(function(w,d,s,l,i){w[l]=w[l]||[];w[l].push({'gtm.start': new Date().getTime(),event:'gtm.js'});var f=d.getElementsByTagName(s)[0], j=d.createElement(s),dl=l!='dataLayer'?'&l='+l:'';j.async=true;j.src= 'https://www.googletagmanager.com/gtm.js?id='+i+dl;f.parentNode.insertBefore(j,f); })(window,document,'script','dataLayer_old','GTM-5G9JF7Z');</script> <!-- End Google Tag Manager --> <script> window.gptadslots = []; window.googletag = window.googletag || {}; window.googletag.cmd = window.googletag.cmd || []; </script> <script type="text/javascript"> // TODO(jacob): This should be defined, may be rare load order problem. // Checking if null is just a quick fix, will default to en if unset. // Better fix is to run this immedietely after I18n is set. if (window.I18n != null) { I18n.defaultLocale = "en"; I18n.locale = "en"; I18n.fallbacks = true; } </script> <link rel="canonical" href="https://oxfordbrookes.academia.edu/FabioCuzzolin" /> </head> <!--[if gte IE 9 ]> <body class='ie ie9 c-profiles/works a-summary logged_out'> <![endif]--> <!--[if !(IE) ]><!--> <body class='c-profiles/works a-summary logged_out'> <!--<![endif]--> <div id="fb-root"></div><script>window.fbAsyncInit = function() { FB.init({ appId: "2369844204", version: "v8.0", status: true, cookie: true, xfbml: true }); // Additional initialization code. if (window.InitFacebook) { // facebook.ts already loaded, set it up. window.InitFacebook(); } else { // Set a flag for facebook.ts to find when it loads. window.academiaAuthReadyFacebook = true; } };</script><script>window.fbAsyncLoad = function() { // Protection against double calling of this function if (window.FB) { return; } (function(d, s, id){ var js, fjs = d.getElementsByTagName(s)[0]; if (d.getElementById(id)) {return;} js = d.createElement(s); js.id = id; js.src = "//connect.facebook.net/en_US/sdk.js"; fjs.parentNode.insertBefore(js, fjs); }(document, 'script', 'facebook-jssdk')); } if (!window.defer_facebook) { // Autoload if not deferred window.fbAsyncLoad(); } else { // Defer loading by 5 seconds setTimeout(function() { window.fbAsyncLoad(); }, 5000); }</script> <div id="google-root"></div><script>window.loadGoogle = function() { if (window.InitGoogle) { // google.ts already loaded, set it up. window.InitGoogle("331998490334-rsn3chp12mbkiqhl6e7lu2q0mlbu0f1b"); } else { // Set a flag for google.ts to use when it loads. window.GoogleClientID = "331998490334-rsn3chp12mbkiqhl6e7lu2q0mlbu0f1b"; } };</script><script>window.googleAsyncLoad = function() { // Protection against double calling of this function (function(d) { var js; var id = 'google-jssdk'; var ref = d.getElementsByTagName('script')[0]; if (d.getElementById(id)) { return; } js = d.createElement('script'); js.id = id; js.async = true; js.onload = loadGoogle; js.src = "https://accounts.google.com/gsi/client" ref.parentNode.insertBefore(js, ref); }(document)); } if (!window.defer_google) { // Autoload if not deferred window.googleAsyncLoad(); } else { // Defer loading by 5 seconds setTimeout(function() { window.googleAsyncLoad(); }, 5000); }</script> <div id="tag-manager-body-root"> <!-- Google Tag Manager (noscript) --> <noscript><iframe src="https://www.googletagmanager.com/ns.html?id=GTM-5G9JF7Z" height="0" width="0" style="display:none;visibility:hidden"></iframe></noscript> <!-- End Google Tag Manager (noscript) --> <!-- Event listeners for analytics --> <script> window.addEventListener('load', function() { if (document.querySelector('input[name="commit"]')) { document.querySelector('input[name="commit"]').addEventListener('click', function() { gtag('event', 'click', { event_category: 'button', event_label: 'Log In' }) }) } }); </script> </div> <script>var _comscore = _comscore || []; _comscore.push({ c1: "2", c2: "26766707" }); (function() { var s = document.createElement("script"), el = document.getElementsByTagName("script")[0]; s.async = true; s.src = (document.location.protocol == "https:" ? "https://sb" : "http://b") + ".scorecardresearch.com/beacon.js"; el.parentNode.insertBefore(s, el); })();</script><img src="https://sb.scorecardresearch.com/p?c1=2&c2=26766707&cv=2.0&cj=1" style="position: absolute; visibility: hidden" /> <div id='react-modal'></div> <div class='DesignSystem'> <a class='u-showOnFocus' href='#site'> Skip to main content </a> </div> <div id="upgrade_ie_banner" style="display: none;"><p>Academia.edu no longer supports Internet Explorer.</p><p>To browse Academia.edu and the wider internet faster and more securely, please take a few seconds to <a href="https://www.academia.edu/upgrade-browser">upgrade your browser</a>.</p></div><script>// Show this banner for all versions of IE if (!!window.MSInputMethodContext || /(MSIE)/.test(navigator.userAgent)) { document.getElementById('upgrade_ie_banner').style.display = 'block'; }</script> <div class="DesignSystem bootstrap ShrinkableNav"><div class="navbar navbar-default main-header"><div class="container-wrapper" id="main-header-container"><div class="container"><div class="navbar-header"><div class="nav-left-wrapper u-mt0x"><div class="nav-logo"><a data-main-header-link-target="logo_home" href="https://www.academia.edu/"><img class="visible-xs-inline-block" style="height: 24px;" alt="Academia.edu" src="//a.academia-assets.com/images/academia-logo-redesign-2015-A.svg" width="24" height="24" /><img width="145.2" height="18" class="hidden-xs" style="height: 24px;" alt="Academia.edu" src="//a.academia-assets.com/images/academia-logo-redesign-2015.svg" /></a></div><div class="nav-search"><div class="SiteSearch-wrapper select2-no-default-pills"><form class="js-SiteSearch-form DesignSystem" action="https://www.academia.edu/search" accept-charset="UTF-8" method="get"><i class="SiteSearch-icon fa fa-search u-fw700 u-positionAbsolute u-tcGrayDark"></i><input class="js-SiteSearch-form-input SiteSearch-form-input form-control" data-main-header-click-target="search_input" name="q" placeholder="Search" type="text" value="" /></form></div></div></div><div class="nav-right-wrapper pull-right"><ul class="NavLinks js-main-nav list-unstyled"><li class="NavLinks-link"><a class="js-header-login-url Button Button--inverseGray Button--sm u-mb4x" id="nav_log_in" rel="nofollow" href="https://www.academia.edu/login">Log In</a></li><li class="NavLinks-link u-p0x"><a class="Button Button--inverseGray Button--sm u-mb4x" rel="nofollow" href="https://www.academia.edu/signup">Sign Up</a></li></ul><button class="hidden-lg hidden-md hidden-sm u-ml4x navbar-toggle collapsed" data-target=".js-mobile-header-links" data-toggle="collapse" type="button"><span class="icon-bar"></span><span class="icon-bar"></span><span class="icon-bar"></span></button></div></div><div class="collapse navbar-collapse js-mobile-header-links"><ul class="nav navbar-nav"><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/login">Log In</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/signup">Sign Up</a></li><li class="u-borderColorGrayLight u-borderBottom1 js-mobile-nav-expand-trigger"><a href="#">more <span class="caret"></span></a></li><li><ul class="js-mobile-nav-expand-section nav navbar-nav u-m0x collapse"><li class="u-borderColorGrayLight u-borderBottom1"><a rel="false" href="https://www.academia.edu/about">About</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/press">Press</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="false" href="https://www.academia.edu/documents">Papers</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/terms">Terms</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/privacy">Privacy</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/copyright">Copyright</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://www.academia.edu/hiring"><i class="fa fa-briefcase"></i> We're Hiring!</a></li><li class="u-borderColorGrayLight u-borderBottom1"><a rel="nofollow" href="https://support.academia.edu/hc/en-us"><i class="fa fa-question-circle"></i> Help Center</a></li><li class="js-mobile-nav-collapse-trigger u-borderColorGrayLight u-borderBottom1 dropup" style="display:none"><a href="#">less <span class="caret"></span></a></li></ul></li></ul></div></div></div><script>(function(){ var $moreLink = $(".js-mobile-nav-expand-trigger"); var $lessLink = $(".js-mobile-nav-collapse-trigger"); var $section = $('.js-mobile-nav-expand-section'); $moreLink.click(function(ev){ ev.preventDefault(); $moreLink.hide(); $lessLink.show(); $section.collapse('show'); }); $lessLink.click(function(ev){ ev.preventDefault(); $moreLink.show(); $lessLink.hide(); $section.collapse('hide'); }); })() if ($a.is_logged_in() || false) { new Aedu.NavigationController({ el: '.js-main-nav', showHighlightedNotification: false }); } else { $(".js-header-login-url").attr("href", $a.loginUrlWithRedirect()); } Aedu.autocompleteSearch = new AutocompleteSearch({el: '.js-SiteSearch-form'});</script></div></div> <div id='site' class='fixed'> <div id="content" class="clearfix"> <script>document.addEventListener('DOMContentLoaded', function(){ var $dismissible = $(".dismissible_banner"); $dismissible.click(function(ev) { $dismissible.hide(); }); });</script> <script src="//a.academia-assets.com/assets/webpack_bundles/profile.wjs-bundle-091a194a2533e53e1630c5cfd78813a4445aff73d16c70cdba1eafe8c0939f4a.js" defer="defer"></script><script>$viewedUser = Aedu.User.set_viewed( {"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin","photo":"https://0.academia-photos.com/366407/112374/61740579/s65_fabio.cuzzolin.jpg","has_photo":true,"department":{"id":2223049,"name":"School of Engineering, Computing and Mathematics","url":"https://oxfordbrookes.academia.edu/Departments/School_of_Engineering_Computing_and_Mathematics/Documents","university":{"id":45,"name":"Oxford Brookes University","url":"https://oxfordbrookes.academia.edu/"}},"position":"Faculty Member","position_id":1,"is_analytics_public":true,"interests":[{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":94224,"name":"Theory of Evidence","url":"https://www.academia.edu/Documents/in/Theory_of_Evidence"},{"id":5394,"name":"Fuzzy set theory","url":"https://www.academia.edu/Documents/in/Fuzzy_set_theory"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":6132,"name":"Soft Computing","url":"https://www.academia.edu/Documents/in/Soft_Computing"},{"id":5486,"name":"Clustering and Classification Methods","url":"https://www.academia.edu/Documents/in/Clustering_and_Classification_Methods"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":51071,"name":"Nonmonotonic Reasoning","url":"https://www.academia.edu/Documents/in/Nonmonotonic_Reasoning"},{"id":2531,"name":"Belief Revision (Computer Science)","url":"https://www.academia.edu/Documents/in/Belief_Revision_Computer_Science_"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":5436,"name":"Combinatorics","url":"https://www.academia.edu/Documents/in/Combinatorics"},{"id":16098,"name":"Modeling uncertainty; Fuzzy Clusters","url":"https://www.academia.edu/Documents/in/Modeling_uncertainty_Fuzzy_Clusters"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":5109,"name":"Pattern Recognition","url":"https://www.academia.edu/Documents/in/Pattern_Recognition"},{"id":1185,"name":"Image Processing","url":"https://www.academia.edu/Documents/in/Image_Processing"},{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":17701,"name":"Gesture Recognition","url":"https://www.academia.edu/Documents/in/Gesture_Recognition"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":58648,"name":"Human Gait Analysis","url":"https://www.academia.edu/Documents/in/Human_Gait_Analysis"},{"id":73500,"name":"Human Motion Analysis","url":"https://www.academia.edu/Documents/in/Human_Motion_Analysis"},{"id":25660,"name":"Decision Theory","url":"https://www.academia.edu/Documents/in/Decision_Theory"},{"id":757,"name":"Game Theory","url":"https://www.academia.edu/Documents/in/Game_Theory"},{"id":52867,"name":"Rationality","url":"https://www.academia.edu/Documents/in/Rationality"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":4165,"name":"Fuzzy Logic","url":"https://www.academia.edu/Documents/in/Fuzzy_Logic"},{"id":31900,"name":"Fuzzy","url":"https://www.academia.edu/Documents/in/Fuzzy"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"},{"id":4060,"name":"Applied Statistics","url":"https://www.academia.edu/Documents/in/Applied_Statistics"},{"id":305,"name":"Applied Mathematics","url":"https://www.academia.edu/Documents/in/Applied_Mathematics"},{"id":265402,"name":"Applied Mathematics and Statistics","url":"https://www.academia.edu/Documents/in/Applied_Mathematics_and_Statistics"},{"id":21593,"name":"Artificial Inteligence","url":"https://www.academia.edu/Documents/in/Artificial_Inteligence"},{"id":141502,"name":"Inteligencia artificial","url":"https://www.academia.edu/Documents/in/Inteligencia_artificial-1"},{"id":1223686,"name":"Artificial Intelligent and Soft Computing Methodologies","url":"https://www.academia.edu/Documents/in/Artificial_Intelligent_and_Soft_Computing_Methodologies"},{"id":9898,"name":"Soft Computing, Image Processing and Robotics","url":"https://www.academia.edu/Documents/in/Soft_Computing_Image_Processing_and_Robotics"},{"id":4937,"name":"Theory of Mind","url":"https://www.academia.edu/Documents/in/Theory_of_Mind"},{"id":20053,"name":"Theory of Mind (Psychology)","url":"https://www.academia.edu/Documents/in/Theory_of_Mind_Psychology_"},{"id":142848,"name":"Theory of Mind (ToM) / Empathy / Emotion Recognition.","url":"https://www.academia.edu/Documents/in/Theory_of_Mind_ToM_Empathy_Emotion_Recognition"},{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":15665,"name":"Video Processing","url":"https://www.academia.edu/Documents/in/Video_Processing"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":77,"name":"Robotics","url":"https://www.academia.edu/Documents/in/Robotics"},{"id":358,"name":"Convex Geometry","url":"https://www.academia.edu/Documents/in/Convex_Geometry"},{"id":300,"name":"Mathematics","url":"https://www.academia.edu/Documents/in/Mathematics"},{"id":2043,"name":"Mobile Robotics","url":"https://www.academia.edu/Documents/in/Mobile_Robotics"},{"id":471,"name":"Robotics (Computer Science)","url":"https://www.academia.edu/Documents/in/Robotics_Computer_Science_"},{"id":7728,"name":"Object Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Object_Recognition_Computer_Vision_"},{"id":674235,"name":"Combinatorics \u0026 Statistics","url":"https://www.academia.edu/Documents/in/Combinatorics_and_Statistics"},{"id":161,"name":"Neuroscience","url":"https://www.academia.edu/Documents/in/Neuroscience"},{"id":21548,"name":"Cognitive Neuroscience","url":"https://www.academia.edu/Documents/in/Cognitive_Neuroscience"},{"id":5451,"name":"Computational Neuroscience","url":"https://www.academia.edu/Documents/in/Computational_Neuroscience"},{"id":42630,"name":"Surgical Robotics","url":"https://www.academia.edu/Documents/in/Surgical_Robotics"},{"id":4892,"name":"Autonomous Robotics","url":"https://www.academia.edu/Documents/in/Autonomous_Robotics"},{"id":1762908,"name":"Self-Driving Cars","url":"https://www.academia.edu/Documents/in/Self-Driving_Cars"},{"id":84688,"name":"Autonomous Vehicles","url":"https://www.academia.edu/Documents/in/Autonomous_Vehicles"},{"id":204921,"name":"Autonomous driving","url":"https://www.academia.edu/Documents/in/Autonomous_driving"},{"id":111217,"name":"Scene Understanding","url":"https://www.academia.edu/Documents/in/Scene_Understanding"},{"id":26870,"name":"Image segmentation","url":"https://www.academia.edu/Documents/in/Image_segmentation"},{"id":42835,"name":"Video Analysis","url":"https://www.academia.edu/Documents/in/Video_Analysis"},{"id":1286170,"name":"Self-Supervised Learning","url":"https://www.academia.edu/Documents/in/Self-Supervised_Learning"}]} ); if ($a.is_logged_in() && $viewedUser.is_current_user()) { $('body').addClass('profile-viewed-by-owner'); } $socialProfiles = [{"id":137590,"link":"http://cms.brookes.ac.uk/staff/FabioCuzzolin/","name":"Fabio Cuzzolin at Brookes","link_domain":"cms.brookes.ac.uk","icon":"//www.google.com/s2/u/0/favicons?domain=cms.brookes.ac.uk"},{"id":137591,"link":"http://perception.inrialpes.fr/people/Cuzzolin/","name":"Fabio Cuzzolin at INRIA","link_domain":"perception.inrialpes.fr","icon":"//www.google.com/s2/u/0/favicons?domain=perception.inrialpes.fr"},{"id":137592,"link":"http://cms.brookes.ac.uk/staff/FabioCuzzolin/bibliography-evidence.html","name":"Belief Functions and Theory of Evidence - A bibliography","link_domain":"cms.brookes.ac.uk","icon":"//www.google.com/s2/u/0/favicons?domain=cms.brookes.ac.uk"},{"id":137593,"link":"http://www.bfasociety.org/","name":"Belief Functions and Applications Society","link_domain":"www.bfasociety.org","icon":"//www.google.com/s2/u/0/favicons?domain=www.bfasociety.org"},{"id":137594,"link":"http://cms.brookes.ac.uk/staff/FabioCuzzolin/refs-gait.html","name":"A Gait ID Review","link_domain":"cms.brookes.ac.uk","icon":"//www.google.com/s2/u/0/favicons?domain=cms.brookes.ac.uk"},{"id":137595,"link":"http://www.sipta.org/","name":"Society for Imprecise Probabilities","link_domain":"www.sipta.org","icon":"//www.google.com/s2/u/0/favicons?domain=www.sipta.org"},{"id":137596,"link":"http://www.linkedin.com/profile/view?id=96024872\u0026locale=en_US\u0026trk=tab_pro","name":"LinkedIn profile","link_domain":"www.linkedin.com","icon":"//www.google.com/s2/u/0/favicons?domain=www.linkedin.com"},{"id":137597,"link":"http://arnetminer.org/person/-1472154.html","name":"Arnetminer profile","link_domain":"arnetminer.org","icon":"//www.google.com/s2/u/0/favicons?domain=arnetminer.org"}]</script><div id="js-react-on-rails-context" style="display:none" data-rails-context="{"inMailer":false,"i18nLocale":"en","i18nDefaultLocale":"en","href":"https://oxfordbrookes.academia.edu/FabioCuzzolin","location":"/FabioCuzzolin","scheme":"https","host":"oxfordbrookes.academia.edu","port":null,"pathname":"/FabioCuzzolin","search":null,"httpAcceptLanguage":null,"serverSide":false}"></div> <div class="js-react-on-rails-component" style="display:none" data-component-name="ProfileCheckPaperUpdate" data-props="{}" data-trace="false" data-dom-id="ProfileCheckPaperUpdate-react-component-2208fd36-8db3-4b81-9877-153b135771ae"></div> <div id="ProfileCheckPaperUpdate-react-component-2208fd36-8db3-4b81-9877-153b135771ae"></div> <div class="DesignSystem"><div class="onsite-ping" id="onsite-ping"></div></div><div class="profile-user-info DesignSystem"><div class="social-profile-container"><div class="left-panel-container"><div class="user-info-component-wrapper"><div class="user-summary-cta-container"><div class="user-summary-container"><div class="social-profile-avatar-container"><img class="profile-avatar u-positionAbsolute" alt="Fabio Cuzzolin" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/366407/112374/61740579/s200_fabio.cuzzolin.jpg" /></div><div class="title-container"><h1 class="ds2-5-heading-sans-serif-sm">Fabio Cuzzolin</h1><div class="affiliations-container fake-truncate js-profile-affiliations"><div><a class="u-tcGrayDarker" href="https://oxfordbrookes.academia.edu/">Oxford Brookes University</a>, <a class="u-tcGrayDarker" href="https://oxfordbrookes.academia.edu/Departments/School_of_Engineering_Computing_and_Mathematics/Documents">School of Engineering, Computing and Mathematics</a>, <span class="u-tcGrayDarker">Faculty Member</span></div><div><a class="u-tcGrayDarker" href="https://oxfordbrookes.academia.edu/">Oxford Brookes University</a>, <a class="u-tcGrayDarker" href="https://oxfordbrookes.academia.edu/Departments/Department_of_Computing_and_Communication_Technologies/Documents">Department of Computing and Communication Technologies</a>, <span class="u-tcGrayDarker">Faculty Member</span></div></div></div></div><div class="sidebar-cta-container"><button class="ds2-5-button hidden profile-cta-button grow js-profile-follow-button" data-broccoli-component="user-info.follow-button" data-click-track="profile-user-info-follow-button" data-follow-user-fname="Fabio" data-follow-user-id="366407" data-follow-user-source="profile_button" data-has-google="false"><span class="material-symbols-outlined" style="font-size: 20px" translate="no">add</span>Follow</button><button class="ds2-5-button hidden profile-cta-button grow js-profile-unfollow-button" data-broccoli-component="user-info.unfollow-button" data-click-track="profile-user-info-unfollow-button" data-unfollow-user-id="366407"><span class="material-symbols-outlined" style="font-size: 20px" translate="no">done</span>Following</button></div></div><div class="user-stats-container"><a><div class="stat-container js-profile-followers"><p class="label">Followers</p><p class="data">153,059</p></div></a><a><div class="stat-container js-profile-followees" data-broccoli-component="user-info.followees-count" data-click-track="profile-expand-user-info-following"><p class="label">Following</p><p class="data">230</p></div></a><a><div class="stat-container js-profile-coauthors" data-broccoli-component="user-info.coauthors-count" data-click-track="profile-expand-user-info-coauthors"><p class="label">Co-authors</p><p class="data">22</p></div></a><span><div class="stat-container"><p class="label"><span class="js-profile-total-view-text">Public Views</span></p><p class="data"><span class="js-profile-view-count"></span></p></div></span></div><div class="user-bio-container"><div class="profile-bio fake-truncate js-profile-about" style="margin: 0px;">Fabio Cuzzolin was born in Jesolo, Italy. <br />He graduated magna cum laude with the University of Padua, was awarded a Ph.D. there in 2001 for a thesis entitled “Visions of a generalized probability theory", and worked in world-class institutions such as the Washington University in St. Louis, Politecnico di Milano, the University of California at Los Angeles. In 2006 he was awarded a Marie Curie Fellowship with INRIA Rhone-Alpes, France. In 2007 he classified second there in the Senior Researcher national recruitment.<br />He has been at Brookes since 2008, took up a Senior Lectureship there in July'11, and a Readership in October 2011.<br />He has been nominated Subject Coordinator for the new Master's course in Computer Vision which will be launched by the Department in September 2013.<br />He has taken on the role of Head of the Artificial Intelligence and Vision research group in September 2012, and has been awarded in October 2012 a Next 10 award by the Faculty of Technology, Design and Environment as one of its top emerging researchers.<br />He is currently supervising two Ph.D. students, an EPSRC-funded postdoc, two visiting students from Turkey and Italy. Two more Ph.D. students will join his group in 2014. <br /><br />Dr Cuzzolin is a world expert in uncertainty theory and belief functions theory. He worked extensively on the mathematical foundations of belief calculus. His main contribution is his geometric approach to uncertainty measures, in which uncertainty measures are represented as points of a Cartesian space and there analyzed. His work on the field is in the process of being published in two separate monographs published by Springer-Verlag (“The geometry of uncertainty”) and Lambert Academic Publishing ( "Visions of a generalized probability theory"). <br /><br />He is also well known for his work in computer vision, mainly machine learning for human motion analysis, including tensorial models for identity recognition, metric learning for action recognition, and spectral techniques for articulated object segmentation and matching.<br /><br />He is the author of some 90 peer-reviewed publications, published or under review, including two monographs, an edited Springer volume, 3 book chapters, 14 journals (+ 8 u/r), 9 chapters in collections.<br />He won awards for Best Paper at PRICAI'08, Poster Prize at the ISIPTA'11 Symposium on Imprecise Probabilities, Best Poster at the 2012 INRIA Summer School on Machine Learning and Visual Recognition, and was short-listed for prizes at the ECSQARU'11 and BMVC12 conferences, where he was given the Outstanding Reviewer Award. <br /><br />Dr Cuzzolin is Associate Editor of the IEEE Transactions of Fuzzy Systems, Guest Editor for the International Journal of Approximate Reasoning, has been AE for “IEEE Transactions on Systems, Man, and Cybernetics C", Guest Editor for “Information Fusion", and collaborates as a reviewer with many other journals in both computer vision and imprecise probabilities, such as: Artificial Intelligence, the IEEE Transactions on Systems, Man, and Cybernetics - part B, the IEEE Transactions on Fuzzy Systems, Computer Vision and Image Understanding, Information Sciences, the Journal of Risk and Reliability; the International Journal on Uncertainty, Fuzziness, and Knowledge-Based Systems, Image and Vision Computing, the Annals of Operations Research. <br /><br />Dr Cuzzolin has served in the TCP of around 50 international conferences, including BMVC, IPMU and SMC, and is Senior Program Committee member of Uncertainty in Artificial Intelligence (UAI).<br />He was the Program Chair and local organizer of the 3rd International Conference on the Theory of Belief Functions (BELIEF 2014), which was held in St. Hugh's College, Oxford, UK.<br /><span class="u-fw700">Phone: </span>+44 (0)1865 484526<br /><b>Address: </b>Department of Computing and Communication Technologies <br />Faculty of Technology, Design and Environment <br />Oxford Brookes University <br />Wheatley campus, OX33 1HX, OXFORD, UK<br /><div class="js-profile-less-about u-linkUnstyled u-tcGrayDarker u-textDecorationUnderline u-displayNone">less</div></div></div><div class="suggested-academics-container"><div class="suggested-academics--header"><h3 class="ds2-5-heading-sans-serif-xs">Related Authors</h3></div><ul class="suggested-user-card-list" data-nosnippet="true"><div class="suggested-user-card"><div class="suggested-user-card__avatar social-profile-avatar-container"><a data-nosnippet="" href="https://rub.academia.edu/ChristianStrasser"><img class="profile-avatar u-positionAbsolute" alt="Christian Straßer related author profile picture" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/5599/2157/4682752/s200_christian.stra_er.jpg" /></a></div><div class="suggested-user-card__user-info"><a class="suggested-user-card__user-info__header ds2-5-body-sm-bold ds2-5-body-link" href="https://rub.academia.edu/ChristianStrasser">Christian Straßer</a><p class="suggested-user-card__user-info__subheader ds2-5-body-xs">Ruhr-Universität Bochum</p></div></div><div class="suggested-user-card"><div class="suggested-user-card__avatar social-profile-avatar-container"><a data-nosnippet="" href="https://harvard.academia.edu/StevenPinker"><img class="profile-avatar u-positionAbsolute" alt="Steven Pinker related author profile picture" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/12758/4264/18675036/s200_steven.pinker.jpg" /></a></div><div class="suggested-user-card__user-info"><a class="suggested-user-card__user-info__header ds2-5-body-sm-bold ds2-5-body-link" href="https://harvard.academia.edu/StevenPinker">Steven Pinker</a><p class="suggested-user-card__user-info__subheader ds2-5-body-xs">Harvard University</p></div></div><div class="suggested-user-card"><div class="suggested-user-card__avatar social-profile-avatar-container"><a data-nosnippet="" href="https://vub.academia.edu/BeatSigner"><img class="profile-avatar u-positionAbsolute" alt="Beat Signer related author profile picture" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/13155/4407/155010730/s200_beat.signer.png" /></a></div><div class="suggested-user-card__user-info"><a class="suggested-user-card__user-info__header ds2-5-body-sm-bold ds2-5-body-link" href="https://vub.academia.edu/BeatSigner">Beat Signer</a><p class="suggested-user-card__user-info__subheader ds2-5-body-xs">Vrije Universiteit Brussel</p></div></div><div class="suggested-user-card"><div class="suggested-user-card__avatar social-profile-avatar-container"><a data-nosnippet="" href="https://williams.academia.edu/StevenJMiller"><img class="profile-avatar u-positionAbsolute" alt="Steven J. Miller related author profile picture" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/13733/4607/4518/s200_steven_j..miller.jpg" /></a></div><div class="suggested-user-card__user-info"><a class="suggested-user-card__user-info__header ds2-5-body-sm-bold ds2-5-body-link" href="https://williams.academia.edu/StevenJMiller">Steven J. Miller</a><p class="suggested-user-card__user-info__subheader ds2-5-body-xs">Williams College</p></div></div><div class="suggested-user-card"><div class="suggested-user-card__avatar social-profile-avatar-container"><a data-nosnippet="" href="https://ucc-ie.academia.edu/DonRoss"><img class="profile-avatar u-positionAbsolute" alt="Don Ross related author profile picture" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/34228/11192/19836440/s200_don.ross.jpg" /></a></div><div class="suggested-user-card__user-info"><a class="suggested-user-card__user-info__header ds2-5-body-sm-bold ds2-5-body-link" href="https://ucc-ie.academia.edu/DonRoss">Don Ross</a><p class="suggested-user-card__user-info__subheader ds2-5-body-xs">University College Cork</p></div></div><div class="suggested-user-card"><div class="suggested-user-card__avatar social-profile-avatar-container"><a data-nosnippet="" href="https://sfu-kras.academia.edu/OlegVorobyev"><img class="profile-avatar u-positionAbsolute" alt="Oleg Yu Vorobyev related author profile picture" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/38890/12977/347919/s200_oleg.vorobyev.gif" /></a></div><div class="suggested-user-card__user-info"><a class="suggested-user-card__user-info__header ds2-5-body-sm-bold ds2-5-body-link" href="https://sfu-kras.academia.edu/OlegVorobyev">Oleg Yu Vorobyev</a><p class="suggested-user-card__user-info__subheader ds2-5-body-xs">Siberian Federal University</p></div></div><div class="suggested-user-card"><div class="suggested-user-card__avatar social-profile-avatar-container"><a data-nosnippet="" href="https://ksu.academia.edu/DavidSeamon"><img class="profile-avatar u-positionAbsolute" alt="David Seamon related author profile picture" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/93547/25922/29662134/s200_david.seamon.jpg" /></a></div><div class="suggested-user-card__user-info"><a class="suggested-user-card__user-info__header ds2-5-body-sm-bold ds2-5-body-link" href="https://ksu.academia.edu/DavidSeamon">David Seamon</a><p class="suggested-user-card__user-info__subheader ds2-5-body-xs">Kansas State University</p></div></div><div class="suggested-user-card"><div class="suggested-user-card__avatar social-profile-avatar-container"><a data-nosnippet="" href="https://uni-koln.academia.edu/RemoCaponi"><img class="profile-avatar u-positionAbsolute" alt="Remo Caponi related author profile picture" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/103326/28437/67684825/s200_remo.caponi.jpg" /></a></div><div class="suggested-user-card__user-info"><a class="suggested-user-card__user-info__header ds2-5-body-sm-bold ds2-5-body-link" href="https://uni-koln.academia.edu/RemoCaponi">Remo Caponi</a><p class="suggested-user-card__user-info__subheader ds2-5-body-xs">University of Cologne</p></div></div><div class="suggested-user-card"><div class="suggested-user-card__avatar social-profile-avatar-container"><a data-nosnippet="" href="https://vit.academia.edu/cherukuri"><img class="profile-avatar u-positionAbsolute" alt="Aswani Kumar Cherukuri related author profile picture" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/116222/31240/7470926/s200_aswani_kumar.cherukuri.jpg" /></a></div><div class="suggested-user-card__user-info"><a class="suggested-user-card__user-info__header ds2-5-body-sm-bold ds2-5-body-link" href="https://vit.academia.edu/cherukuri">Aswani Kumar Cherukuri</a><p class="suggested-user-card__user-info__subheader ds2-5-body-xs">VIT University</p></div></div><div class="suggested-user-card"><div class="suggested-user-card__avatar social-profile-avatar-container"><a data-nosnippet="" href="https://allduniv.academia.edu/AnoopChaturvedi"><img class="profile-avatar u-positionAbsolute" alt="Anoop Chaturvedi related author profile picture" border="0" onerror="if (this.src != '//a.academia-assets.com/images/s200_no_pic.png') this.src = '//a.academia-assets.com/images/s200_no_pic.png';" width="200" height="200" src="https://0.academia-photos.com/130539/1758910/2102533/s200_anoop.chaturvedi.jpg" /></a></div><div class="suggested-user-card__user-info"><a class="suggested-user-card__user-info__header ds2-5-body-sm-bold ds2-5-body-link" href="https://allduniv.academia.edu/AnoopChaturvedi">Anoop Chaturvedi</a><p class="suggested-user-card__user-info__subheader ds2-5-body-xs">University of Allahabad</p></div></div></ul></div><style type="text/css">.suggested-academics--header h3{font-size:16px;font-weight:500;line-height:20px}</style><div class="ri-section"><div class="ri-section-header"><span>Interests</span><a class="ri-more-link js-profile-ri-list-card" data-click-track="profile-user-info-primary-research-interest" data-has-card-for-ri-list="366407">View All (43)</a></div><div class="ri-tags-container"><a data-click-track="profile-user-info-expand-research-interests" data-has-card-for-ri-list="366407" href="https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"><div id="js-react-on-rails-context" style="display:none" data-rails-context="{"inMailer":false,"i18nLocale":"en","i18nDefaultLocale":"en","href":"https://oxfordbrookes.academia.edu/FabioCuzzolin","location":"/FabioCuzzolin","scheme":"https","host":"oxfordbrookes.academia.edu","port":null,"pathname":"/FabioCuzzolin","search":null,"httpAcceptLanguage":null,"serverSide":false}"></div> <div class="js-react-on-rails-component" style="display:none" data-component-name="Pill" data-props="{"color":"gray","children":["Reasoning about Uncertainty"]}" data-trace="false" data-dom-id="Pill-react-component-92f19709-0bd0-41e1-a1b5-06af0dd151a3"></div> <div id="Pill-react-component-92f19709-0bd0-41e1-a1b5-06af0dd151a3"></div> </a><a data-click-track="profile-user-info-expand-research-interests" data-has-card-for-ri-list="366407" href="https://www.academia.edu/Documents/in/Theory_of_Evidence"><div class="js-react-on-rails-component" style="display:none" data-component-name="Pill" data-props="{"color":"gray","children":["Theory of Evidence"]}" data-trace="false" data-dom-id="Pill-react-component-1eca85b5-c33b-4e76-9ee9-f86dde539dd7"></div> <div id="Pill-react-component-1eca85b5-c33b-4e76-9ee9-f86dde539dd7"></div> </a><a data-click-track="profile-user-info-expand-research-interests" data-has-card-for-ri-list="366407" href="https://www.academia.edu/Documents/in/Fuzzy_set_theory"><div class="js-react-on-rails-component" style="display:none" data-component-name="Pill" data-props="{"color":"gray","children":["Fuzzy set theory"]}" data-trace="false" data-dom-id="Pill-react-component-25babf22-6c6e-4e1d-8f8c-f8295db3777e"></div> <div id="Pill-react-component-25babf22-6c6e-4e1d-8f8c-f8295db3777e"></div> </a><a data-click-track="profile-user-info-expand-research-interests" data-has-card-for-ri-list="366407" href="https://www.academia.edu/Documents/in/Computer_Vision"><div class="js-react-on-rails-component" style="display:none" data-component-name="Pill" data-props="{"color":"gray","children":["Computer Vision"]}" data-trace="false" data-dom-id="Pill-react-component-b0fddd08-a31a-4f6b-a718-0fc97c7cd47c"></div> <div id="Pill-react-component-b0fddd08-a31a-4f6b-a718-0fc97c7cd47c"></div> </a><a data-click-track="profile-user-info-expand-research-interests" data-has-card-for-ri-list="366407" href="https://www.academia.edu/Documents/in/Clustering_and_Classification_Methods"><div class="js-react-on-rails-component" style="display:none" data-component-name="Pill" data-props="{"color":"gray","children":["Clustering and Classification Methods"]}" data-trace="false" data-dom-id="Pill-react-component-2294dad2-938b-4089-8aa7-c1932a221d72"></div> <div id="Pill-react-component-2294dad2-938b-4089-8aa7-c1932a221d72"></div> </a></div></div><div class="external-links-container"><ul class="profile-links new-profile js-UserInfo-social"><li class="left-most js-UserInfo-social-cv" data-broccoli-component="user-info.cv-button" data-click-track="profile-user-info-cv" data-cv-filename="cv.pdf" data-placement="top" data-toggle="tooltip" href="/FabioCuzzolin/CurriculumVitae"><button class="ds2-5-text-link ds2-5-text-link--small" style="font-size: 20px; letter-spacing: 0.8px"><span class="ds2-5-text-link__content">CV</span></button></li><li class="profile-profiles js-social-profiles-container"><i class="fa fa-spin fa-spinner"></i></li></ul></div></div></div><div class="right-panel-container"><div class="user-content-wrapper"><div class="uploads-container" id="social-redesign-work-container"><div class="upload-header"><h2 class="ds2-5-heading-sans-serif-xs">Uploads</h2></div><div class="nav-container backbone-profile-documents-nav hidden-xs"><ul class="nav-tablist" role="tablist"><li class="nav-chip active" role="presentation"><a data-section-name="" data-toggle="tab" href="#all" role="tab">all</a></li><li class="nav-chip" role="presentation"><a class="js-profile-docs-nav-section u-textTruncate" data-click-track="profile-works-tab" data-section-name="Videos" data-toggle="tab" href="#videos" role="tab" title="Videos"><span>2</span> <span class="ds2-5-body-sm-bold">Videos</span></a></li><li class="nav-chip" role="presentation"><a class="js-profile-docs-nav-section u-textTruncate" data-click-track="profile-works-tab" data-section-name="Papers" data-toggle="tab" href="#papers" role="tab" title="Papers"><span>316</span> <span class="ds2-5-body-sm-bold">Papers</span></a></li><li class="nav-chip" role="presentation"><a class="js-profile-docs-nav-section u-textTruncate" data-click-track="profile-works-tab" data-section-name="Tutorials-and-Presentations" data-toggle="tab" href="#tutorialsandpresentations" role="tab" title="Tutorials and Presentations"><span>9</span> <span class="ds2-5-body-sm-bold">Tutorials and Presentations</span></a></li><li class="nav-chip more-tab" role="presentation"><a class="js-profile-documents-more-tab link-unstyled u-textTruncate" data-toggle="dropdown" role="tab">More <i class="fa fa-chevron-down"></i></a><ul class="js-profile-documents-more-dropdown dropdown-menu dropdown-menu-right profile-documents-more-dropdown" role="menu"><li role="presentation"><a data-click-track="profile-works-tab" data-section-name="Books-and-Monographs" data-toggle="tab" href="#booksandmonographs" role="tab" style="border: none;"><span>4</span> Books and Monographs</a></li><li role="presentation"><a data-click-track="profile-works-tab" data-section-name="Talks" data-toggle="tab" href="#talks" role="tab" style="border: none;"><span>6</span> Talks</a></li><li role="presentation"><a data-click-track="profile-works-tab" data-section-name="Drafts" data-toggle="tab" href="#drafts" role="tab" style="border: none;"><span>8</span> Drafts</a></li></ul></li></ul></div><div class="divider ds-divider-16" style="margin: 0px;"></div><div class="documents-container backbone-social-profile-documents" style="width: 100%;"><div class="u-taCenter"></div><div class="profile--tab_content_container js-tab-pane tab-pane active" id="all"><div class="profile--tab_heading_container js-section-heading" data-section="Videos" id="Videos"><h3 class="profile--tab_heading_container">Videos by Fabio Cuzzolin</h3></div><style type="text/css">/*thumbnail*/ .video-thumbnail-container { position: relative; height: 88px !important; box-sizing: content-box; } .thumbnail-image { height: 100%; width: 100%; object-fit: cover; } .play-icon { position: absolute; width: 40px; height: 40px; top: calc(50% - 20px); left: calc(50% - 20px); } .video-duration { position: absolute; bottom: 2px; right: 2px; color: #ffffff; background-color: #000000; font-size: 12px; font-weight: 500; line-height: 12px; padding: 2px; }</style><div class="js-work-strip profile--work_container" data-video-id="24558"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" href="https://www.academia.edu/video/k9yK7k"><div class="work-thumbnail video-thumbnail-container"><img class="thumbnail-image" onerror="this.src='//a.academia-assets.com/images/videoicon.svg'" src="https://academia-edu-videos.s3.amazonaws.com/transcoded/k9yK7k/thumbnail.jpg?response-content-disposition=inline%3B%20filename%3D%22thumbnail.jpg%22%3B%20filename%2A%3DUTF-8%27%27thumbnail.jpg&response-content-type=image%2Fjpeg&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=ASIATUSBJ6BABRSCM5TP%2F20250331%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250331T113250Z&X-Amz-Expires=20746&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEDwaCXVzLWVhc3QtMSJIMEYCIQDmRsdN07nC2cE4zkwv6jUkQvgUenh0%2FGtLen0eHwPylwIhAJWpwsqdFLPRzGmfwcpj6mWfz8o3wD5HlPVnQ5FwqbsbKpYECKT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEQABoMMjUwMzE4ODExMjAwIgyWRw%2F7h9PNgASRUQoq6gM7aq%2B%2FZhugiLodDyBK9fLzytW6f5C6Ko%2B9m267gmOZdWFqKMCugNyf1E4Yp3We0ZxlEZtmusBqcyAW4mk3iKwUqu9uEzuz9uBpr86O%2BYMqQIPTPwQlQaiKw%2BieCoLx3Ll11vADuZ%2Bx4m652TQVTGWM30Tq%2BP1ulxRpIDhMfnecCtVdJxizxE4x%2F5yvUqB9Yo0uAhX5sII1uqUCeLAmVdh1EyQo5H911N9eQTwsnteSKEedeo6N%2Fk8HgFREBt8ns0iD1TOJF1oDGigLGJN%2B7897ZM53DzQZzMINbSsde%2FIpNlgUmW1oONsJsskAyIpr1ngMIP%2BkZd6HGnPkUkwkS7gI1gSEuKdjerEq0LOIT3kEnEsjzCv7AAMxcjgqGyJC7HjoIOVSG%2FlLj66G%2Fyr3iSNbl%2FwgQz4cURfUg4AvmKO5xsdosqem6upFxkR7Artmyatp%2BCj2llT1BZ40yPH6MaM1%2B7XxxDNIdkm9yGlHW4Cfua%2BtZHk1%2FM%2Fz%2FbPn7kOkQHgi%2FdnrAdijahqtANp0b8bNw7ITFOaSX7zq0AnO6VJncuagyL7oRmvOc%2Be5HeoxyCPqg02%2Fzi%2Bc9fG9ityH36fVcUVuZOZu6n8dRSJEaE752BOqXtxf3z5vM1VxRjoCgu%2BTak7YLiDB1VuDMIz0qb8GOqQBT0hSRbUCp%2BieUnSgQ4VSt4G4OlKTFmz%2B3%2BdBpDbGWWGa87C3VyRyWd9UEiqt3mil%2Bm865z03aorT1JTe64zIBA468A4pvqXnsYTctihP3nhZ4CQSod8KB7jGdeE6rIrxaudtmnzHvLcLdJyLqfZRu4FhK3i1ebhecFj4bOWcTIMLidmXf6hsfS3p1%2FElXZXdMguzjWLoCbpPc9LIyEsA8B7pOEk%3D&X-Amz-SignedHeaders=host&X-Amz-Signature=4d73e33888bf11b9f283e4f73b164bfa8e3bb81b5acb8742bad9da7a2c619f18" /><img alt="Play" class="play-icon" src="//a.academia-assets.com/images/video-play-icon.svg" /><div class="video-duration">44:27</div></div></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" href="https://www.academia.edu/video/k9yK7k">The ROAD event awareness dataset for autonomous driving</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Invited talk @ DeepView: ``Global Multi-Target Visual Surveillance Based on Real-Time Large-Scale...</span><a class="js-work-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Invited talk @ DeepView: ``Global Multi-Target Visual Surveillance Based on Real-Time Large-Scale Analysis", AVSS 2021, Nov 16 2021 <a href="https://sites.google.com/view/deepview2021/" rel="nofollow">https://sites.google.com/view/deepview2021/</a> <br /> <br />Autonomous vehicles (AVs) employ a variety of sensors to identify roadside infrastructure and other road users, with much of the existing work focusing on scene understanding and robust object detection. Human drivers, however, approach the driving task in a more holistic fashion which entails, in particular, recognising and understanding the evolution of road events. Testing an AV’s capability to recognise the actions undertaken by other road agents is thus crucial to improve their situational awareness and facilitate decision making. <br />In this talk we introduce the ROad event Awareness Dataset (ROAD) for Autonomous Driving, to our knowledge the first of its kind. ROAD is explicitly designed to test the ability of an autonomous vehicle to detect road events.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-video-id="k9yK7k"><a class="js-profile-work-strip-edit-button" href="https://oxfordbrookes.academia.edu/video/edit/k9yK7k" rel="nofollow" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-video-id="k9yK7k">28 views</span></span></span></div></div></div><style type="text/css">/*thumbnail*/ .video-thumbnail-container { position: relative; height: 88px !important; box-sizing: content-box; } .thumbnail-image { height: 100%; width: 100%; object-fit: cover; } .play-icon { position: absolute; width: 40px; height: 40px; top: calc(50% - 20px); left: calc(50% - 20px); } .video-duration { position: absolute; bottom: 2px; right: 2px; color: #ffffff; background-color: #000000; font-size: 12px; font-weight: 500; line-height: 12px; padding: 2px; }</style><div class="js-work-strip profile--work_container" data-video-id="24563"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" href="https://www.academia.edu/video/jYADbj"><div class="work-thumbnail video-thumbnail-container"><img class="thumbnail-image" onerror="this.src='//a.academia-assets.com/images/videoicon.svg'" src="https://academia-edu-videos.s3.amazonaws.com/transcoded/jYADbj/thumbnail.jpg?response-content-disposition=inline%3B%20filename%3D%22thumbnail.jpg%22%3B%20filename%2A%3DUTF-8%27%27thumbnail.jpg&response-content-type=image%2Fjpeg&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=ASIATUSBJ6BABRSCM5TP%2F20250331%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250331T113250Z&X-Amz-Expires=20746&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEDwaCXVzLWVhc3QtMSJIMEYCIQDmRsdN07nC2cE4zkwv6jUkQvgUenh0%2FGtLen0eHwPylwIhAJWpwsqdFLPRzGmfwcpj6mWfz8o3wD5HlPVnQ5FwqbsbKpYECKT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEQABoMMjUwMzE4ODExMjAwIgyWRw%2F7h9PNgASRUQoq6gM7aq%2B%2FZhugiLodDyBK9fLzytW6f5C6Ko%2B9m267gmOZdWFqKMCugNyf1E4Yp3We0ZxlEZtmusBqcyAW4mk3iKwUqu9uEzuz9uBpr86O%2BYMqQIPTPwQlQaiKw%2BieCoLx3Ll11vADuZ%2Bx4m652TQVTGWM30Tq%2BP1ulxRpIDhMfnecCtVdJxizxE4x%2F5yvUqB9Yo0uAhX5sII1uqUCeLAmVdh1EyQo5H911N9eQTwsnteSKEedeo6N%2Fk8HgFREBt8ns0iD1TOJF1oDGigLGJN%2B7897ZM53DzQZzMINbSsde%2FIpNlgUmW1oONsJsskAyIpr1ngMIP%2BkZd6HGnPkUkwkS7gI1gSEuKdjerEq0LOIT3kEnEsjzCv7AAMxcjgqGyJC7HjoIOVSG%2FlLj66G%2Fyr3iSNbl%2FwgQz4cURfUg4AvmKO5xsdosqem6upFxkR7Artmyatp%2BCj2llT1BZ40yPH6MaM1%2B7XxxDNIdkm9yGlHW4Cfua%2BtZHk1%2FM%2Fz%2FbPn7kOkQHgi%2FdnrAdijahqtANp0b8bNw7ITFOaSX7zq0AnO6VJncuagyL7oRmvOc%2Be5HeoxyCPqg02%2Fzi%2Bc9fG9ityH36fVcUVuZOZu6n8dRSJEaE752BOqXtxf3z5vM1VxRjoCgu%2BTak7YLiDB1VuDMIz0qb8GOqQBT0hSRbUCp%2BieUnSgQ4VSt4G4OlKTFmz%2B3%2BdBpDbGWWGa87C3VyRyWd9UEiqt3mil%2Bm865z03aorT1JTe64zIBA468A4pvqXnsYTctihP3nhZ4CQSod8KB7jGdeE6rIrxaudtmnzHvLcLdJyLqfZRu4FhK3i1ebhecFj4bOWcTIMLidmXf6hsfS3p1%2FElXZXdMguzjWLoCbpPc9LIyEsA8B7pOEk%3D&X-Amz-SignedHeaders=host&X-Amz-Signature=4186090485480959bdb124c4a74877ee5d0f3c6cb98504ca6c1989b84017db21" /><img alt="Play" class="play-icon" src="//a.academia-assets.com/images/video-play-icon.svg" /><div class="video-duration">01:24:32</div></div></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" href="https://www.academia.edu/video/jYADbj">Belief functions: past, present and future</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Invited seminar, Department of Statistics, Harvard University, 2016 The theory of belief funct...</span><a class="js-work-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Invited seminar, Department of Statistics, Harvard University, 2016 <br /> <br />The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. The methodology is now well established as a general framework for reasoning with uncertainty, with well-understood connections to related frameworks such as probability, possibility, random set and imprecise probability theories. <br /> <br />This talk aims at bridging the gap between researchers in the field and the wider AI and Uncertainty Theory community, with the longer term goal of a more fruitful collaboration and dissemination of ideas.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-video-id="jYADbj"><a class="js-profile-work-strip-edit-button" href="https://oxfordbrookes.academia.edu/video/edit/jYADbj" rel="nofollow" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-video-id="jYADbj">44 views</span></span></span></div></div></div><div class="profile--tab_heading_container js-section-heading" data-section="Papers" id="Papers"><h3 class="profile--tab_heading_container">Papers by Fabio Cuzzolin</h3></div><div class="js-work-strip profile--work_container" data-work-id="127272558"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/127272558/Epistemic_Artificial_Intelligence_Using_random_sets_to_quantify_uncertainty_in_machine_learning"><img alt="Research paper thumbnail of Epistemic Artificial Intelligence: Using random sets to quantify uncertainty in machine learning" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title">Epistemic Artificial Intelligence: Using random sets to quantify uncertainty in machine learning</div><div class="wp-workCard_item"><span>Data Science for Econometrics and Related Topics</span><span>, 2025</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Quantifying uncertainty is fundamental in machine learning tasks, including classification, detec...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness,<br />interpretability, and reliability while effectively modelling uncertainty.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272558"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272558"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272558; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272558]").text(description); $(".js-view-count[data-work-id=127272558]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272558; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272558']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=127272558]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272558,"title":"Epistemic Artificial Intelligence: Using random sets to quantify uncertainty in machine learning","translated_title":"","metadata":{"abstract":"Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness,\ninterpretability, and reliability while effectively modelling uncertainty.","publication_date":{"day":null,"month":null,"year":2025,"errors":{}},"publication_name":"Data Science for Econometrics and Related Topics"},"translated_abstract":"Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness,\ninterpretability, and reliability while effectively modelling uncertainty.","internal_url":"https://www.academia.edu/127272558/Epistemic_Artificial_Intelligence_Using_random_sets_to_quantify_uncertainty_in_machine_learning","translated_internal_url":"","created_at":"2025-01-26T07:50:03.209-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":43025091,"work_id":127272558,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604285,"email":"1***5@brookes.ac.uk","display_order":0,"name":"Shireen Kudukkil Manchingal","title":"Epistemic Artificial Intelligence: Using random sets to quantify uncertainty in machine learning"},{"id":43025092,"work_id":127272558,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604258,"email":"s***n@brookes.ac.uk","display_order":1073741824,"name":"Salman Khan","title":"Epistemic Artificial Intelligence: Using random sets to quantify uncertainty in machine learning"}],"downloadable_attachments":[],"slug":"Epistemic_Artificial_Intelligence_Using_random_sets_to_quantify_uncertainty_in_machine_learning","translated_slug":"","page_count":null,"language":"en","content_type":"Work","summary":"Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness,\ninterpretability, and reliability while effectively modelling uncertainty.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":815,"name":"Epistemology","url":"https://www.academia.edu/Documents/in/Epistemology"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":41239,"name":"Bayesian statistics \u0026 modelling","url":"https://www.academia.edu/Documents/in/Bayesian_statistics_and_modelling"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-127272558-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272448"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272448/EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using_random_sets_to_quantify_uncertainty_in_machine_learning"><img alt="Research paper thumbnail of EPISTEMIC ARTIFICIAL INTELLIGENCE: Using random sets to quantify uncertainty in machine learning" class="work-thumbnail" src="https://attachments.academia-assets.com/121029150/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272448/EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using_random_sets_to_quantify_uncertainty_in_machine_learning">EPISTEMIC ARTIFICIAL INTELLIGENCE: Using random sets to quantify uncertainty in machine learning</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Quantifying uncertainty is fundamental in machine learning tasks, including classification, detec...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness, interpretability, and reliability while effectively modelling uncertainty.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="e2f76d8380fa7a8a090f927e7651b266" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121029150,"asset_id":127272448,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121029150/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272448"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272448"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272448; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272448]").text(description); $(".js-view-count[data-work-id=127272448]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272448; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272448']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "e2f76d8380fa7a8a090f927e7651b266" } } $('.js-work-strip[data-work-id=127272448]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272448,"title":"EPISTEMIC ARTIFICIAL INTELLIGENCE: Using random sets to quantify uncertainty in machine learning","translated_title":"","metadata":{"grobid_abstract":"Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness, interpretability, and reliability while effectively modelling uncertainty.","grobid_abstract_attachment_id":121029150},"translated_abstract":null,"internal_url":"https://www.academia.edu/127272448/EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using_random_sets_to_quantify_uncertainty_in_machine_learning","translated_internal_url":"","created_at":"2025-01-26T07:45:42.650-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":121029150,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029150/thumbnails/1.jpg","file_name":"Epistemic_AI_using_random_sets_to_quantify_uncertainty_in_machine_learning_Invited_paper_.pdf","download_url":"https://www.academia.edu/attachments/121029150/download_file","bulk_download_file_name":"EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029150/Epistemic_AI_using_random_sets_to_quantify_uncertainty_in_machine_learning_Invited_paper_-libre.pdf?1737909163=\u0026response-content-disposition=attachment%3B+filename%3DEPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using.pdf\u0026Expires=1743344600\u0026Signature=GLTvjeKD59hhXRZNgj2HdK8STCh2R83PfD~GBWhdN~vvnFh5IveudAzTWu~gzhuE-GVlX2exSxBk~YhBzwtgq6jP6YXQh0VDrp6t72UspyWZO5CsNIPXC44lJ1MVh3Vlf3hvXLIETi~eC9mwcUKI4-FfxL2MjMXjyK6SmPUQkGRB178-k-7KDlZfsaD9NhjEBbtgg9ZNGY2cx~eIJvnKzNzlUj8fqTVtq8x~JePtnQ1AHkSafYIWSjDJqQiSDfQ6XhHwxtPtBdP1OEkOiKnkAqg6My7t4d862qyxl1ozlNib-8CGAhj-YJCFpZWq1syjO8L6SKUPzJy~TXphL5UX3g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using_random_sets_to_quantify_uncertainty_in_machine_learning","translated_slug":"","page_count":28,"language":"en","content_type":"Work","summary":"Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness, interpretability, and reliability while effectively modelling uncertainty.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121029150,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029150/thumbnails/1.jpg","file_name":"Epistemic_AI_using_random_sets_to_quantify_uncertainty_in_machine_learning_Invited_paper_.pdf","download_url":"https://www.academia.edu/attachments/121029150/download_file","bulk_download_file_name":"EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029150/Epistemic_AI_using_random_sets_to_quantify_uncertainty_in_machine_learning_Invited_paper_-libre.pdf?1737909163=\u0026response-content-disposition=attachment%3B+filename%3DEPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using.pdf\u0026Expires=1743344600\u0026Signature=GLTvjeKD59hhXRZNgj2HdK8STCh2R83PfD~GBWhdN~vvnFh5IveudAzTWu~gzhuE-GVlX2exSxBk~YhBzwtgq6jP6YXQh0VDrp6t72UspyWZO5CsNIPXC44lJ1MVh3Vlf3hvXLIETi~eC9mwcUKI4-FfxL2MjMXjyK6SmPUQkGRB178-k-7KDlZfsaD9NhjEBbtgg9ZNGY2cx~eIJvnKzNzlUj8fqTVtq8x~JePtnQ1AHkSafYIWSjDJqQiSDfQ6XhHwxtPtBdP1OEkOiKnkAqg6My7t4d862qyxl1ozlNib-8CGAhj-YJCFpZWq1syjO8L6SKUPzJy~TXphL5UX3g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-127272448-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272380"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272380/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks"><img alt="Research paper thumbnail of CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks" class="work-thumbnail" src="https://attachments.academia-assets.com/121029070/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272380/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks">CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a> and <a class="" data-click-track="profile-work-strip-authors" href="https://ugent.academia.edu/kshariat">Keivan Sh</a></span></div><div class="wp-workCard_item"><span>Machine Learning</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Uncertainty estimation is increasingly attractive for improving the reliability of neural network...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="84398b4e39a9cf3140787369d21d82e2" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121029070,"asset_id":127272380,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121029070/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272380"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272380"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272380; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272380]").text(description); $(".js-view-count[data-work-id=127272380]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272380; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272380']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "84398b4e39a9cf3140787369d21d82e2" } } $('.js-work-strip[data-work-id=127272380]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272380,"title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks","translated_title":"","metadata":{"abstract":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","ai_title_tag":"Credal-Set Interval Neural Networks for Uncertainty","publication_name":"Machine Learning"},"translated_abstract":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","internal_url":"https://www.academia.edu/127272380/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks","translated_internal_url":"","created_at":"2025-01-26T07:43:04.675-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":43025074,"work_id":127272380,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":1,"name":"Kaizheng Wang","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":43025075,"work_id":127272380,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":2,"name":"Keivan Shariatmadar","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":43025076,"work_id":127272380,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053054,"email":"d***s@kuleuven.be","display_order":4,"name":"David Moens","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":43025077,"work_id":127272380,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4480510,"email":"h***z@kuleuven.be","display_order":5,"name":"Hans Hallez","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":43025078,"work_id":127272380,"tagging_user_id":366407,"tagged_user_id":538842,"co_author_invite_id":null,"email":"k***t@gmail.com","affiliation":"Ghent University","display_order":6,"name":"Keivan Sh","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"}],"downloadable_attachments":[{"id":121029070,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029070/thumbnails/1.jpg","file_name":"2401.05043.pdf","download_url":"https://www.academia.edu/attachments/121029070/download_file","bulk_download_file_name":"CreINNs_Credal_Set_Interval_Neural_Netwo.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029070/2401.05043-libre.pdf?1737909166=\u0026response-content-disposition=attachment%3B+filename%3DCreINNs_Credal_Set_Interval_Neural_Netwo.pdf\u0026Expires=1743344601\u0026Signature=ZNQZ~CITPyzRuvgVCWZNV5PEdoI2rwi5IQW4~NjzL0JnUmngk8YnWV3LrnNeZdjQ0fbfz14atGC4FQ5M-3L36E408iendC~14WCm87iBWWHInF3SF6698kfVCUBuVkwauu~HnUjrKxhjtlKERXtdxhI9FTwzrtFJJHrLkfuOTpMuFBNbsUucn~IZFeKwMyabWVzYYLmfnEREzVypYqc9lX6jjSvVecn4icCs0lE-b6QClRAzrr4MRRrJV3h78NjHmL09FeyyGJnpGua3Hinmk64muwLGNRpSOtQhAXuO7YfKVcbUUs1GnEHoCSSLrFeoC~Gh-p-K1CjqwxOBav4Myg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks","translated_slug":"","page_count":11,"language":"en","content_type":"Work","summary":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121029070,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029070/thumbnails/1.jpg","file_name":"2401.05043.pdf","download_url":"https://www.academia.edu/attachments/121029070/download_file","bulk_download_file_name":"CreINNs_Credal_Set_Interval_Neural_Netwo.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029070/2401.05043-libre.pdf?1737909166=\u0026response-content-disposition=attachment%3B+filename%3DCreINNs_Credal_Set_Interval_Neural_Netwo.pdf\u0026Expires=1743344601\u0026Signature=ZNQZ~CITPyzRuvgVCWZNV5PEdoI2rwi5IQW4~NjzL0JnUmngk8YnWV3LrnNeZdjQ0fbfz14atGC4FQ5M-3L36E408iendC~14WCm87iBWWHInF3SF6698kfVCUBuVkwauu~HnUjrKxhjtlKERXtdxhI9FTwzrtFJJHrLkfuOTpMuFBNbsUucn~IZFeKwMyabWVzYYLmfnEREzVypYqc9lX6jjSvVecn4icCs0lE-b6QClRAzrr4MRRrJV3h78NjHmL09FeyyGJnpGua3Hinmk64muwLGNRpSOtQhAXuO7YfKVcbUUs1GnEHoCSSLrFeoC~Gh-p-K1CjqwxOBav4Myg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":41815,"name":"Applied Probability","url":"https://www.academia.edu/Documents/in/Applied_Probability"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":177212,"name":"Interval analysis","url":"https://www.academia.edu/Documents/in/Interval_analysis"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-127272380-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272278"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272278/Analyzing_the_Feasibility_of_Achieving_Machine_Theory_of_Mind_through_ToMnet_like_Approaches"><img alt="Research paper thumbnail of Analyzing the Feasibility of Achieving Machine Theory of Mind through ToMnet-like Approaches" class="work-thumbnail" src="https://attachments.academia-assets.com/121029000/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272278/Analyzing_the_Feasibility_of_Achieving_Machine_Theory_of_Mind_through_ToMnet_like_Approaches">Analyzing the Feasibility of Achieving Machine Theory of Mind through ToMnet-like Approaches</a></div><div class="wp-workCard_item"><span>Workshop on Advancing Artificial Intelligence through Theory of Mind (ToM4AI)</span><span>, 2025</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">This paper critically examines the efficacy of the ToMnet family of models in achieving a genuine...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">This paper critically examines the efficacy of the ToMnet family of models in achieving a genuine Machine Theory of Mind (ToM). We introduce ToMnet-N, an open-source evolution of ToMnet+, developed to replicate and extend previous experiments while addressing architectural limitations. Our analysis reveals that ToMnet-N's success in predictive tasks stems primarily from pattern recognition within training data rather than true mental state understanding. These findings question the validity of ToMnet-like approaches in modeling authentic ToM capabilities and suggest that a paradigm shift may be necessary. This work contributes to the field of Computational ToM by providing a comprehensive evaluation and proposing directions for future research.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="e65279cddcac522951f0ae9c9d1a9333" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121029000,"asset_id":127272278,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121029000/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272278"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272278"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272278; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272278]").text(description); $(".js-view-count[data-work-id=127272278]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272278; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272278']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "e65279cddcac522951f0ae9c9d1a9333" } } $('.js-work-strip[data-work-id=127272278]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272278,"title":"Analyzing the Feasibility of Achieving Machine Theory of Mind through ToMnet-like Approaches","translated_title":"","metadata":{"abstract":"This paper critically examines the efficacy of the ToMnet family of models in achieving a genuine Machine Theory of Mind (ToM). We introduce ToMnet-N, an open-source evolution of ToMnet+, developed to replicate and extend previous experiments while addressing architectural limitations. Our analysis reveals that ToMnet-N's success in predictive tasks stems primarily from pattern recognition within training data rather than true mental state understanding. These findings question the validity of ToMnet-like approaches in modeling authentic ToM capabilities and suggest that a paradigm shift may be necessary. This work contributes to the field of Computational ToM by providing a comprehensive evaluation and proposing directions for future research.","publication_date":{"day":null,"month":null,"year":2025,"errors":{}},"publication_name":"Workshop on Advancing Artificial Intelligence through Theory of Mind (ToM4AI)"},"translated_abstract":"This paper critically examines the efficacy of the ToMnet family of models in achieving a genuine Machine Theory of Mind (ToM). We introduce ToMnet-N, an open-source evolution of ToMnet+, developed to replicate and extend previous experiments while addressing architectural limitations. Our analysis reveals that ToMnet-N's success in predictive tasks stems primarily from pattern recognition within training data rather than true mental state understanding. These findings question the validity of ToMnet-like approaches in modeling authentic ToM capabilities and suggest that a paradigm shift may be necessary. This work contributes to the field of Computational ToM by providing a comprehensive evaluation and proposing directions for future research.","internal_url":"https://www.academia.edu/127272278/Analyzing_the_Feasibility_of_Achieving_Machine_Theory_of_Mind_through_ToMnet_like_Approaches","translated_internal_url":"","created_at":"2025-01-26T07:37:59.029-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":121029000,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029000/thumbnails/1.jpg","file_name":"1_Analyzing_the_Feasibility_of.pdf","download_url":"https://www.academia.edu/attachments/121029000/download_file","bulk_download_file_name":"Analyzing_the_Feasibility_of_Achieving_M.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029000/1_Analyzing_the_Feasibility_of-libre.pdf?1737909164=\u0026response-content-disposition=attachment%3B+filename%3DAnalyzing_the_Feasibility_of_Achieving_M.pdf\u0026Expires=1743344601\u0026Signature=O5EUIY~Ku03H8TV9vjrFBt1UBDecReMp-RQbtb18rggHFAT6DiRC-byfIhd~qNXOOFSTsIeD1g-~sNM~RcF6t3m-dyvh7kMEg7NQwOpozb1gAPLwI2-vMN01d9ubuFbxQAsD-9keytRfAlnExSEjCx7uqJH8YjuGFuv2xZ3~TvGJh2J-JdRJ0HeyflDSoSL5xEpm6w4MvrQjUa~71nUGmCfkToT10usQpQlfrGpoxe5G6g6nJvKP3vWmHuaZFtVljDC6ofhWM2omiEP0LgF9Xx0pGEX33rWT56IkAkd8oOWAln~MxLQg1~dSmozTA3kh1k63GZzU6s-~uIKSh08pMw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Analyzing_the_Feasibility_of_Achieving_Machine_Theory_of_Mind_through_ToMnet_like_Approaches","translated_slug":"","page_count":2,"language":"en","content_type":"Work","summary":"This paper critically examines the efficacy of the ToMnet family of models in achieving a genuine Machine Theory of Mind (ToM). We introduce ToMnet-N, an open-source evolution of ToMnet+, developed to replicate and extend previous experiments while addressing architectural limitations. Our analysis reveals that ToMnet-N's success in predictive tasks stems primarily from pattern recognition within training data rather than true mental state understanding. These findings question the validity of ToMnet-like approaches in modeling authentic ToM capabilities and suggest that a paradigm shift may be necessary. This work contributes to the field of Computational ToM by providing a comprehensive evaluation and proposing directions for future research.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121029000,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029000/thumbnails/1.jpg","file_name":"1_Analyzing_the_Feasibility_of.pdf","download_url":"https://www.academia.edu/attachments/121029000/download_file","bulk_download_file_name":"Analyzing_the_Feasibility_of_Achieving_M.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029000/1_Analyzing_the_Feasibility_of-libre.pdf?1737909164=\u0026response-content-disposition=attachment%3B+filename%3DAnalyzing_the_Feasibility_of_Achieving_M.pdf\u0026Expires=1743344601\u0026Signature=O5EUIY~Ku03H8TV9vjrFBt1UBDecReMp-RQbtb18rggHFAT6DiRC-byfIhd~qNXOOFSTsIeD1g-~sNM~RcF6t3m-dyvh7kMEg7NQwOpozb1gAPLwI2-vMN01d9ubuFbxQAsD-9keytRfAlnExSEjCx7uqJH8YjuGFuv2xZ3~TvGJh2J-JdRJ0HeyflDSoSL5xEpm6w4MvrQjUa~71nUGmCfkToT10usQpQlfrGpoxe5G6g6nJvKP3vWmHuaZFtVljDC6ofhWM2omiEP0LgF9Xx0pGEX33rWT56IkAkd8oOWAln~MxLQg1~dSmozTA3kh1k63GZzU6s-~uIKSh08pMw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4937,"name":"Theory of Mind","url":"https://www.academia.edu/Documents/in/Theory_of_Mind"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":20053,"name":"Theory of Mind (Psychology)","url":"https://www.academia.edu/Documents/in/Theory_of_Mind_Psychology_"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-127272278-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272251"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272251/Random_Set_Neural_Networks"><img alt="Research paper thumbnail of Random-Set Neural Networks" class="work-thumbnail" src="https://attachments.academia-assets.com/121028921/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272251/Random_Set_Neural_Networks">Random-Set Neural Networks</a></div><div class="wp-workCard_item"><span>2025 International Conference on Learning Representations (ICLR 2025)</span><span>, 2025</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Machine learning is increasingly deployed in safety-critical domains where erroneous predictions ...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Machine learning is increasingly deployed in safety-critical domains where erroneous predictions may lead to potentially catastrophic consequences, highlighting the need for learning systems to be aware of how confident they are in their own predictions: in other words, 'to know when they do not know'. In this paper, we propose a novel Random-Set Neural Network (RS-NN) approach to classification which predicts belief functions (rather than classical probability vectors) over the class list using the mathematics of random sets, i.e., distributions over the collection of sets of classes. RS-NN encodes the 'epistemic' uncertainty induced by training sets that are insufficiently representative or limited in size via the size of the convex set of probability vectors associated with a predicted belief function. Our approach outperforms state-of-the-art Bayesian and Ensemble methods in terms of accuracy, uncertainty estimation and out-of-distribution (OoD) detection on multiple benchmarks (CIFAR-10 vs SVHN/Intel-Image, MNIST vs FM-NIST/KMNIST, ImageNet vs ImageNet-O). RS-NN also scales up effectively to large-scale architectures (e.g. WideResNet-28-10, VGG16, Inception V3, Effi-cientNetB2 and ViT-Base-16), exhibits remarkable robustness to adversarial attacks and can provide statistical guarantees in a conformal learning setting.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="f2d7a6d440cf96ede9262a6e19968d3e" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121028921,"asset_id":127272251,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121028921/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272251"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272251"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272251; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272251]").text(description); $(".js-view-count[data-work-id=127272251]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272251; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272251']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "f2d7a6d440cf96ede9262a6e19968d3e" } } $('.js-work-strip[data-work-id=127272251]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272251,"title":"Random-Set Neural Networks","translated_title":"","metadata":{"abstract":"Machine learning is increasingly deployed in safety-critical domains where erroneous predictions may lead to potentially catastrophic consequences, highlighting the need for learning systems to be aware of how confident they are in their own predictions: in other words, 'to know when they do not know'. In this paper, we propose a novel Random-Set Neural Network (RS-NN) approach to classification which predicts belief functions (rather than classical probability vectors) over the class list using the mathematics of random sets, i.e., distributions over the collection of sets of classes. RS-NN encodes the 'epistemic' uncertainty induced by training sets that are insufficiently representative or limited in size via the size of the convex set of probability vectors associated with a predicted belief function. Our approach outperforms state-of-the-art Bayesian and Ensemble methods in terms of accuracy, uncertainty estimation and out-of-distribution (OoD) detection on multiple benchmarks (CIFAR-10 vs SVHN/Intel-Image, MNIST vs FM-NIST/KMNIST, ImageNet vs ImageNet-O). RS-NN also scales up effectively to large-scale architectures (e.g. WideResNet-28-10, VGG16, Inception V3, Effi-cientNetB2 and ViT-Base-16), exhibits remarkable robustness to adversarial attacks and can provide statistical guarantees in a conformal learning setting.","ai_title_tag":"Random-Set Neural Networks for Uncertainty Estimation","publication_date":{"day":null,"month":null,"year":2025,"errors":{}},"publication_name":"2025 International Conference on Learning Representations (ICLR 2025)"},"translated_abstract":"Machine learning is increasingly deployed in safety-critical domains where erroneous predictions may lead to potentially catastrophic consequences, highlighting the need for learning systems to be aware of how confident they are in their own predictions: in other words, 'to know when they do not know'. In this paper, we propose a novel Random-Set Neural Network (RS-NN) approach to classification which predicts belief functions (rather than classical probability vectors) over the class list using the mathematics of random sets, i.e., distributions over the collection of sets of classes. RS-NN encodes the 'epistemic' uncertainty induced by training sets that are insufficiently representative or limited in size via the size of the convex set of probability vectors associated with a predicted belief function. Our approach outperforms state-of-the-art Bayesian and Ensemble methods in terms of accuracy, uncertainty estimation and out-of-distribution (OoD) detection on multiple benchmarks (CIFAR-10 vs SVHN/Intel-Image, MNIST vs FM-NIST/KMNIST, ImageNet vs ImageNet-O). RS-NN also scales up effectively to large-scale architectures (e.g. WideResNet-28-10, VGG16, Inception V3, Effi-cientNetB2 and ViT-Base-16), exhibits remarkable robustness to adversarial attacks and can provide statistical guarantees in a conformal learning setting.","internal_url":"https://www.academia.edu/127272251/Random_Set_Neural_Networks","translated_internal_url":"","created_at":"2025-01-26T07:35:07.569-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":43025046,"work_id":127272251,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604285,"email":"1***5@brookes.ac.uk","display_order":1,"name":"Shireen Kudukkil Manchingal","title":"Random-Set Neural Networks"},{"id":43025047,"work_id":127272251,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":2,"name":"Kaizheng Wang","title":"Random-Set Neural Networks"},{"id":43025048,"work_id":127272251,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":3,"name":"Keivan Shariatmadar","title":"Random-Set Neural Networks"}],"downloadable_attachments":[{"id":121028921,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028921/thumbnails/1.jpg","file_name":"4961_Random_Set_Neural_Network.pdf","download_url":"https://www.academia.edu/attachments/121028921/download_file","bulk_download_file_name":"Random_Set_Neural_Networks.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028921/4961_Random_Set_Neural_Network-libre.pdf?1737909454=\u0026response-content-disposition=attachment%3B+filename%3DRandom_Set_Neural_Networks.pdf\u0026Expires=1743344601\u0026Signature=Sr6~AaJ963qLj~ORlmdsOfzGaLR0usIk0CpPE2mbincjK~PbLE0i~tgEGerFrm1mJIM52lUfwx3nzJ8~sOQyuei8DH2XLpwXYt3HMtxdy4S-OlGpUBBIka9l11NFpiF6UeQO3Dk6Ww0fy6SBH9OO~ro9tQEG0ETaeH-OYsS6C99UBpayEGBmJLwH6uSQvC-ThXEabcZ1Rc8VNxBRUUK~TPzwk7QxWUSJ49e2Er6IN1ZVipZG4pQkoGoQlSYf7QObqqR82rfbPjI~zUfg96-0ci4QDaQrMPeJBFLnHQ-42vuj62JGWmaJpDuOxuvHGeOONZwyRln0p-v~J7DDOLAsWA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Random_Set_Neural_Networks","translated_slug":"","page_count":42,"language":"en","content_type":"Work","summary":"Machine learning is increasingly deployed in safety-critical domains where erroneous predictions may lead to potentially catastrophic consequences, highlighting the need for learning systems to be aware of how confident they are in their own predictions: in other words, 'to know when they do not know'. In this paper, we propose a novel Random-Set Neural Network (RS-NN) approach to classification which predicts belief functions (rather than classical probability vectors) over the class list using the mathematics of random sets, i.e., distributions over the collection of sets of classes. RS-NN encodes the 'epistemic' uncertainty induced by training sets that are insufficiently representative or limited in size via the size of the convex set of probability vectors associated with a predicted belief function. Our approach outperforms state-of-the-art Bayesian and Ensemble methods in terms of accuracy, uncertainty estimation and out-of-distribution (OoD) detection on multiple benchmarks (CIFAR-10 vs SVHN/Intel-Image, MNIST vs FM-NIST/KMNIST, ImageNet vs ImageNet-O). RS-NN also scales up effectively to large-scale architectures (e.g. WideResNet-28-10, VGG16, Inception V3, Effi-cientNetB2 and ViT-Base-16), exhibits remarkable robustness to adversarial attacks and can provide statistical guarantees in a conformal learning setting.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121028921,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028921/thumbnails/1.jpg","file_name":"4961_Random_Set_Neural_Network.pdf","download_url":"https://www.academia.edu/attachments/121028921/download_file","bulk_download_file_name":"Random_Set_Neural_Networks.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028921/4961_Random_Set_Neural_Network-libre.pdf?1737909454=\u0026response-content-disposition=attachment%3B+filename%3DRandom_Set_Neural_Networks.pdf\u0026Expires=1743344601\u0026Signature=Sr6~AaJ963qLj~ORlmdsOfzGaLR0usIk0CpPE2mbincjK~PbLE0i~tgEGerFrm1mJIM52lUfwx3nzJ8~sOQyuei8DH2XLpwXYt3HMtxdy4S-OlGpUBBIka9l11NFpiF6UeQO3Dk6Ww0fy6SBH9OO~ro9tQEG0ETaeH-OYsS6C99UBpayEGBmJLwH6uSQvC-ThXEabcZ1Rc8VNxBRUUK~TPzwk7QxWUSJ49e2Er6IN1ZVipZG4pQkoGoQlSYf7QObqqR82rfbPjI~zUfg96-0ci4QDaQrMPeJBFLnHQ-42vuj62JGWmaJpDuOxuvHGeOONZwyRln0p-v~J7DDOLAsWA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":41815,"name":"Applied Probability","url":"https://www.academia.edu/Documents/in/Applied_Probability"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":492766,"name":"Random Sets","url":"https://www.academia.edu/Documents/in/Random_Sets"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-127272251-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272202"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272202/Credal_wrapper_of_model_averaging_for_uncertainty_estimation_in_classification"><img alt="Research paper thumbnail of Credal wrapper of model averaging for uncertainty estimation in classification" class="work-thumbnail" src="https://attachments.academia-assets.com/121028826/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272202/Credal_wrapper_of_model_averaging_for_uncertainty_estimation_in_classification">Credal wrapper of model averaging for uncertainty estimation in classification</a></div><div class="wp-workCard_item"><span>2025 International Conference on Learning Representations (ICLR 2025)</span><span>, 2025</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">This paper presents an innovative approach, called credal wrapper, to formulating a credal set re...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">This paper presents an innovative approach, called credal wrapper, to formulating a credal set representation of model averaging for Bayesian neural networks (BNNs) and deep ensembles (DEs), capable of improving uncertainty estimation in classification tasks. Given a finite collection of single predictive distributions derived from BNNs or DEs, the proposed credal wrapper approach extracts an upper and a lower probability bound per class, acknowledging the epistemic uncertainty due to the availability of a limited amount of distributions. Such probability intervals over classes can be mapped on a convex set of probabilities (a credal set) from which, in turn, a unique prediction can be obtained using a transformation called intersection probability transformation. In this article, we conduct extensive experiments on several out-of-distribution (OOD) detection benchmarks, encompassing various dataset pairs (CIFAR10/100 vs SVHN/Tiny-ImageNet, CI-FAR10 vs CIFAR10-C, CIFAR100 vs CIFAR100-C and ImageNet vs ImageNet-O) and using different network architectures (such as VGG16, ResNet-18/50, Ef-ficientNet B2, and ViT Base). Compared to the BNN and DE baselines, the proposed credal wrapper method exhibits superior performance in uncertainty estimation and achieves a lower expected calibration error on corrupted data.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-127272202-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-127272202-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890772/figure-1-inspired-by-the-use-of-probability-intervals-for"><img alt="[Inspired by the use of probability intervals for decision-making (Yager & Kreinovich, 1999; Guo & Tanaka, 2010), we propose to build probability intervals by extracting the upper and lower bound per class from the given set of limited (categorical) probability distributions, validating this choice via extensive experiments in Section 4. E.g., consider again the task of predicting weather con- ditions (rainy, sunny, or cloudy). When receiving three probability values for the rainy condition, 2.g., 0.2, 0.1, and 0.7, using probability intervals we model the uncertainty on the probability of the rainy condition as [0.1,0.7]. Each probability interval system can determine a convex set of probabilities over the set of classes, i.e., a credal set. Such a credal set is a more natural model than individual distributions for representing the epistemic uncertainty encoded by the prediction, as it amounts to constraints on the unknown exact distribution (Hiillermeier & Waegeman, 2021; Shaker & Hiillermeier, 2021; Sale et al., 2023a). Nevertheless, a single predictive distribution, termed in- tersection probability, can still be derived from a credal set to generate a unique class prediction for classification purposes. Our credal wrapper framework is depicted in Figure 1. The remainder of this section discusses the credal wrapper generation, a method for computational complexity reduction for uncertainty estimation, and the intersection probability, in this order. Figure 1: Credal wrapper framework for a three-class (A, B, D) classification task. Given a set of individual probability distributions (denoted as single dots) in the simplex (triangle) of probability distributions of the classes, probability intervals (parallel lines) are derived by extracting the upper and lower probability bounds per class, using eq. (5). Such lower and upper probability intervals induce a credal set on {A, B, D} (P, light blue convex hull in the triangle). A single intersection probability (the red dot) is computed from the credal set using the transform in eq. (5). Uncertainty is estimated in the mathematical framework of credal sets in eq. (4). " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890777/table-15-below-the-baselines-and-poor-eu-estimation"><img alt="below the baselines) and poor EU estimation (evidenced by the lowest OOD detection values), as shown in Table 15 in the Appendix. Figure 2: OOD detection using EU as the metric on CIFAR10 vs CIFAR10-C of the classical and credal wrapper version of BNNs and DE, and EDD against increased corruption intensity, using VGG16 and ResNet-18 as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890789/figure-3-ece-values-of-bnnr-bnnf-and-de-on-cifario-against"><img alt="Figure 3: ECE values of BNNR, BNNF, and DE on CIFARIO-C against increased corruption in- tensity, using the averaged probability (Prob.) and our proposed intersection probability (Prob.). VGGI16 and ResNet-18 are backbones. Results are from 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890802/figure-4-ood-detection-performance-of-the-classical-and"><img alt="Figure 4: OOD detection performance of the classical and credal wrapper version of DEs using EL as the metric on CIFAR10/100 vs CIFAR10-C/100-C against increased corruption intensity, using ResNet-50, EffB2, and ViT-B as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890812/figure-5-ece-values-of-des-on-cifar-and-cifar-against"><img alt="Figure 5: ECE values of DEs on CIFAR10-C and CIFAR100-C against increased corruption in- tensity, using the averaged probability (Prob.) and our proposed intersection probability (Prob.). ResNet-50, EffB2, and ViT-B are backbones. Results are from 15 runs. Then, we construct DEs using different numbers of ensemble members, namely N =3, 5, 10, 15, 20 and 25. Each type of DEs includes 15 instances using distinct seed combinations. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890822/figure-6-ece-values-of-des-with-various-nv-on-cifar-against"><img alt="Figure 6: ECE values of DEs with various NV on CIFAR10-C against increased corruption intensity. of samples overall can lead to a lower ECE value; (ii) Compared to the the naive averaging DE pre- dictions, our intersection probability consistently achieves lower ECE values on corrupted instances Ablation Study on Numbers of Predictive Samples in BNNs In this experiment, we first increase the sampling size of BNNs at prediction time, namely N = 10 and N =50. Table 6 reports the OOD detection performance of BNNR and BNNF involving CIFAR10 (ID) vs SVHN and Tiny-ImageNet (OODs), based on the VGG16 backbone. It shows that credal wrapper consistently produces better EU estimates, as evidenced by enhanced OOD detection performance. Further, Table 12 in the Ap- pendix reports the same comparison based on the ResNet-18 architecture, confirming those results. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890840/figure-7-ood-detection-using-tu-as-metric-on-cifar-vs"><img alt="Figure 7: OOD detection using TU as metric on CIFAR10 vs CIFARIO-C of both the classical and credal wrapper version BNNs and DE against increased corruption intensity, using VGG16 and ResNet-18 as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890847/figure-8-ood-detection-using-tu-as-the-metric-on-cifar-vs"><img alt="Figure 8: OOD detection using TU as the metric on CIFAR10/100 vs CIFAR10-C/100-C of both the classical and credal wrapper version of DEs against increased corruption intensity, using ResNet-50, EffB2, and ViT-B as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890854/figure-9-eu-and-tu-estimates-of-id-cifar-and-ood-svhn-and"><img alt="Figure 9: EU and TU estimates of ID (CIFAR10) and OOD (SVHN and Tiny-ImageNet) samples of the classical and credal wrapper version of DEs, obtained using ResNet-50, EffB2, and ViT backbones. Results are from 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890869/figure-10-eu-and-tu-estimates-of-id-cifar-and-ood-svhn-and"><img alt="Figure 10: EU and TU estimates of ID (CIFAR100) and OOD (SVHN and Tiny-ImageNet) samples of the classical and credal wrapper version of DEs, obtained using ResNet-50, EffB2, and ViT backbones. Results are from 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890888/figure-11-in-this-section-we-further-evaluate-the"><img alt="In this section, we further evaluate the intersection probability on corrupted samples using the neg- ative log-likelihood (NLL) metric. A smaller NLL indicates that the model is more confident and accurate in predicting the correct class for each input (Dusenberry et al., 2020). Figures 11 and 12 show the consistent superiority of the intersection probability on corrupted data in extensive test cases, as evidenced by smaller NLL values. Figure 11: NLL values of BNNR, BNNF, and DE on CIFARIO-C against increased corruption intensity, using the averaged probability (Avg. Prob.) and our proposed intersection probability (Int. Prob.). VGG16 and ResNet-18 are backbones. Results are from 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890900/figure-12-nll-values-of-des-on-cifari-and-cifar-against"><img alt="Figure 12: NLL values of DEs on CIFAR1I0-C and CIFAR100-C against increased corruption in- tensity, using the averaged probability (Avg. Prob.) and our proposed intersection probability (Int. Prob.). ResNet-50, EffB2, and ViT-B are backbones. Results are from 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890914/figure-13-different-credal-set-generation-methods-left-our"><img alt="Figure 13: Different credal set generation methods. Left: our credal wrapper; right: convex hull. The theoretical underpinning for the convex hull method when reasoning with coherent lower prob- abilities (and, therefore, the corresponding credal sets) is that it allows us to comply with the co- herence principle (Walley, 1991). In a Bayesian context, individual predictions (such as those of networks with specified weights) can be interpreted as subjective pieces of evidence about a fact (e.g., what is the true class of an input observation). Coherence ensures that one realizes the full implications of such partial assessments (Walley, 1991; Cuzzolin, 2008). Figure 13 conceptually shows the differences between two methods in a 2D simplex. Compared to the convex hull method, our probability interval systems exhibit a more conservative nature. Another practical difference is that the convex hull method is highly computationally com- plex, preventing it from being practically implemented in multi-class classification tasks. In the following, we aim to explain the associated complexity of the calculation process. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890924/table-1-performance-comparison-between-the-classical-and"><img alt="Table 1: Performance comparison between the classical and credal wrapper version of BNN and DE, as well as EDD models. All models are implemented on VGG16/ResNet-18 backbones and trained using CIFAR10 data as ID samples. The results are from 15 runs. The best scores per metric are in bold. The results on corrupted data are averaged over all corruption types and intensities. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890930/table-2-ood-detection-auroc-and-auprc-performance-of-both"><img alt="Table 2: OOD detection AUROC and AUPRC performance (%) of both the classical and credal wrapper version of DEs using EU as the metric. The results are from 15 runs, based on the ResNet- 50 backbone. Best scores are in bold. datasets and model architectures due to the high computational complexity (Mukhoti et al., 2023). For instance, training a ResNet-50-based BNN on CIFAR- 0 (resized to (224, 224, 3)) failed in our experiment due to exceeding the memory capacity of a single Nvidia A100 GPU. The dataset pairs (ID vs OOD) considered include CIFAR10/CIFAR100 (Krizhevsky, 2012) vs SVHN/Tiny- ImageNet, ImageNet (Deng et al., 2009) vs ImageNet-O (Hendrycks et al., 2021), CIFAR10 vs CIFAR10-C, and CIFAR100 vs CIFAR100-C (Hendrycks & Dietterich, 2019). DEs are imple- mented on the well-established ResNet-50 (He et al., 2016). All input data have a shape of (224, 224, 3). More training details are given in Appendix §B. The PIA algorithm (Algorithm 1) is ap- plied using the settings J = 20 and J =50 to calculate the generalized entropy (H(P) and H(P)) on dataset pairs involving CIFAR100 and ImageNet, respectively. Compared to classical DEs, out credal wrapper demonstrates the enhanced OOD detection across a spectrum of data pairs, as shown in Table 2, suggesting that our proposed method can consistently improve EU estimation. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890944/table-4-ood-detection-performance-comparison-in-des"><img alt="Table 4: OOD detection performance (%) comparison in DEs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890955/table-6-ood-detection-comparisons-using-eu-of-vgg-based-bnns"><img alt="Table 6: OOD detection comparisons using EU (%) of VGG16-based BNNs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890964/table-7-ood-detection-auroc-and-auprc-performance-comparison"><img alt="Table 7: OOD detection AUROC and AUPRC performance (%) comparison between classical and credal wrapper of BNNs and DEs using EU. The results are from 15 runs. Experimental Validation In this ablation study, we evaluate on EU estimation quality of our credal wrapper using GH(P) measure. Table 7 reports OOD detection performance tested on CIFARIO (ID) vs SVHN (OOD) and Tiny-ImageNet (OOD). All models are implemented on the VGG16 backbones. The results demonstrate that our credal wrapper consistently enhances EU estimation performance and is agnostic in the sense that it can accommodate any EU measure for credal sets. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890974/table-8-ood-detection-auroc-and-auprc-performance-comparison"><img alt="Table 8: OOD detection AUROC and AUPRC performance (%) comparison between the classical and credal wrapper version of BNNs and DEs, using TU as the uncertainty metric. All models are implemented on VGG16/ResNet-18 backbones and tested on CIFAR10 (ID) vs SVHN (OOD) and Tiny-ImageNet (OOD). The results are from 15 runs. The best scores are in bold. Table 9: OOD detection AUROC and AUPRC performance (%) of both the classical and credal wrapper version of DEs using TU as the metric. The results are from 15 runs, based on the ResNet- 30 backbone. The best scores are in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890987/table-7-credal-wrapper-of-model-averaging-for-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890995/table-10-ood-detection-auroc-and-auprc-performance-of-both"><img alt="Table 10: OOD detection AUROC and AUPRC performance (%) of both the classical and credal wrapper version of DEs using TU as the metric. Results are from 15 runs, based on EffB2 and ViT-B backbones. The best scores are in bold. \.3. ABLATION STUDY ON OVERCONFIDENCE REGIME " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12891004/table-11-ablation-study-on-numbers-of-predictive-samples-in"><img alt="Table 11: Ablation study on numbers of predictive samples in DEs: OOD detection AUROC and AUPRC performance (%) of both the classical and credal wrapper version of DEs using TU as uncertainty metrics, involving CIFAR1O (ID) vs SVHN (OOD) and Tiny-ImageNet (OOD). The results are from 15 runs. The best scores are in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12891012/table-12-ablation-study-on-numbers-of-predictive-samples-in"><img alt="Table 12: Ablation study on numbers of predictive samples in BNNs: OOD detection AUROC and AUPRC performance (%) of both the classical and credal wrapper version of BNNs with increased number of samples, involving CIFARIO (ID) vs SVHN (OOD) and Tiny-ImageNet (OOD). The results are from 15 runs and the best scores per uncertainty metric are in bold. in the case of 3 classes, if there are three distinct extreme probability vectors (three vertices of the simplex), our credal wrapper method will effectively convey complete uncertainty, with the resulting credal set encompassing the entire simplex. This conservative nature can be sensible, as it expresses our full ignorance of the correct classification. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12891018/table-13-ood-detection-using-eu-left-and-tu-right-as"><img alt="Table 13: OOD detection using EU (left) and TU (right) as uncertainty metrics in overconfider scenarios. The results are from 15 runs, based on the ResNet-50 backbone. The best scores pe uncertainty metric are in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12891022/table-14-ood-detection-auroc-and-auprc-performance-of-credal"><img alt="Table 14: OOD detection AUROC and AUPRC performance (%) of credal wrapper of DEs using EU (left) and TU (right) as uncertainty metrics, and the time cost, using different setting of J of PIA algorithm. The OOD detection involves CIFAR100 (ID) vs SVHN (OOD) and Tiny-ImageNet (OOD). The results are from 15 runs, based on the ResNet-50 backbone. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12891026/table-15-poor-id-prediction-and-ood-detection-performance-of"><img alt="Table 15: Poor ID prediction and OOD detection performance of EDD-Fair. CIFAR10 as ID data OOD Detection Process In this paper, the OOD detection process is treated as a binary classifica- tion. We label ID and OOD samples as 0 and 1, respectively. The model’s uncertainty estimation (using the EU or TU) for each sample is the ‘prediction’ for the detection. In terms of performance indicators, the applied AUROC quantifies the rates of true and false positives. The AUPRC evalu- ates precision and recall trade-offs, providing valuable insights into the model’s effectiveness across different confidence levels. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_013.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-127272202-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="f6dc3e71241070c74aebabecb5a47039" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121028826,"asset_id":127272202,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121028826/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272202"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272202"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272202; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272202]").text(description); $(".js-view-count[data-work-id=127272202]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272202; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272202']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "f6dc3e71241070c74aebabecb5a47039" } } $('.js-work-strip[data-work-id=127272202]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272202,"title":"Credal wrapper of model averaging for uncertainty estimation in classification","translated_title":"","metadata":{"abstract":"This paper presents an innovative approach, called credal wrapper, to formulating a credal set representation of model averaging for Bayesian neural networks (BNNs) and deep ensembles (DEs), capable of improving uncertainty estimation in classification tasks. Given a finite collection of single predictive distributions derived from BNNs or DEs, the proposed credal wrapper approach extracts an upper and a lower probability bound per class, acknowledging the epistemic uncertainty due to the availability of a limited amount of distributions. Such probability intervals over classes can be mapped on a convex set of probabilities (a credal set) from which, in turn, a unique prediction can be obtained using a transformation called intersection probability transformation. In this article, we conduct extensive experiments on several out-of-distribution (OOD) detection benchmarks, encompassing various dataset pairs (CIFAR10/100 vs SVHN/Tiny-ImageNet, CI-FAR10 vs CIFAR10-C, CIFAR100 vs CIFAR100-C and ImageNet vs ImageNet-O) and using different network architectures (such as VGG16, ResNet-18/50, Ef-ficientNet B2, and ViT Base). Compared to the BNN and DE baselines, the proposed credal wrapper method exhibits superior performance in uncertainty estimation and achieves a lower expected calibration error on corrupted data.","publication_date":{"day":null,"month":null,"year":2025,"errors":{}},"publication_name":"2025 International Conference on Learning Representations (ICLR 2025)"},"translated_abstract":"This paper presents an innovative approach, called credal wrapper, to formulating a credal set representation of model averaging for Bayesian neural networks (BNNs) and deep ensembles (DEs), capable of improving uncertainty estimation in classification tasks. Given a finite collection of single predictive distributions derived from BNNs or DEs, the proposed credal wrapper approach extracts an upper and a lower probability bound per class, acknowledging the epistemic uncertainty due to the availability of a limited amount of distributions. Such probability intervals over classes can be mapped on a convex set of probabilities (a credal set) from which, in turn, a unique prediction can be obtained using a transformation called intersection probability transformation. In this article, we conduct extensive experiments on several out-of-distribution (OOD) detection benchmarks, encompassing various dataset pairs (CIFAR10/100 vs SVHN/Tiny-ImageNet, CI-FAR10 vs CIFAR10-C, CIFAR100 vs CIFAR100-C and ImageNet vs ImageNet-O) and using different network architectures (such as VGG16, ResNet-18/50, Ef-ficientNet B2, and ViT Base). Compared to the BNN and DE baselines, the proposed credal wrapper method exhibits superior performance in uncertainty estimation and achieves a lower expected calibration error on corrupted data.","internal_url":"https://www.academia.edu/127272202/Credal_wrapper_of_model_averaging_for_uncertainty_estimation_in_classification","translated_internal_url":"","created_at":"2025-01-26T07:32:02.981-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":43025037,"work_id":127272202,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":1,"name":"Kaizheng Wang","title":"Credal wrapper of model averaging for uncertainty estimation in classification"},{"id":43025038,"work_id":127272202,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604285,"email":"1***5@brookes.ac.uk","display_order":2,"name":"Shireen Kudukkil Manchingal","title":"Credal wrapper of model averaging for uncertainty estimation in classification"},{"id":43025039,"work_id":127272202,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":3,"name":"Keivan Shariatmadar","title":"Credal wrapper of model averaging for uncertainty estimation in classification"},{"id":43025040,"work_id":127272202,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4480510,"email":"h***z@kuleuven.be","display_order":4,"name":"Hans Hallez","title":"Credal wrapper of model averaging for uncertainty estimation in classification"},{"id":43025041,"work_id":127272202,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053054,"email":"d***s@kuleuven.be","display_order":5,"name":"David Moens","title":"Credal wrapper of model averaging for uncertainty estimation in classification"}],"downloadable_attachments":[{"id":121028826,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028826/thumbnails/1.jpg","file_name":"5087_Credal_Wrapper_of_Model_A.pdf","download_url":"https://www.academia.edu/attachments/121028826/download_file","bulk_download_file_name":"Credal_wrapper_of_model_averaging_for_un.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028826/5087_Credal_Wrapper_of_Model_A-libre.pdf?1737909225=\u0026response-content-disposition=attachment%3B+filename%3DCredal_wrapper_of_model_averaging_for_un.pdf\u0026Expires=1743344601\u0026Signature=KpXyjTzFVIYNQnagqnhgli70C87iJA19QirMfd47z~xd7Glv9lCEADexzx-~-TR0KpVSrg~ND0hI-JZ9~H29fhcKXVWXR8yHhllLpUtLD6KbS2V63MlwXpLRONR8ouh7sLOX4ZHqOx5iUIeELLWLDNiPuC9P217pGFEjUA0Uhbh3NfL7KxmjBDYsDfpY8vza7Kr1VyONBuPcKZHkkiiR74bWEix5pmCJ7dhC7DNOmJD~ro~KJafNVkBCe-RYsuHPeCbZGacYAvQc20FjNZQY~Ef4NEFtLyDbhhS~QQvjyRnOv7-ADY2JXWrlqw5wX5DllUc8t48azRHg-kdXAOnvew__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Credal_wrapper_of_model_averaging_for_uncertainty_estimation_in_classification","translated_slug":"","page_count":25,"language":"en","content_type":"Work","summary":"This paper presents an innovative approach, called credal wrapper, to formulating a credal set representation of model averaging for Bayesian neural networks (BNNs) and deep ensembles (DEs), capable of improving uncertainty estimation in classification tasks. Given a finite collection of single predictive distributions derived from BNNs or DEs, the proposed credal wrapper approach extracts an upper and a lower probability bound per class, acknowledging the epistemic uncertainty due to the availability of a limited amount of distributions. Such probability intervals over classes can be mapped on a convex set of probabilities (a credal set) from which, in turn, a unique prediction can be obtained using a transformation called intersection probability transformation. In this article, we conduct extensive experiments on several out-of-distribution (OOD) detection benchmarks, encompassing various dataset pairs (CIFAR10/100 vs SVHN/Tiny-ImageNet, CI-FAR10 vs CIFAR10-C, CIFAR100 vs CIFAR100-C and ImageNet vs ImageNet-O) and using different network architectures (such as VGG16, ResNet-18/50, Ef-ficientNet B2, and ViT Base). Compared to the BNN and DE baselines, the proposed credal wrapper method exhibits superior performance in uncertainty estimation and achieves a lower expected calibration error on corrupted data.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121028826,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028826/thumbnails/1.jpg","file_name":"5087_Credal_Wrapper_of_Model_A.pdf","download_url":"https://www.academia.edu/attachments/121028826/download_file","bulk_download_file_name":"Credal_wrapper_of_model_averaging_for_un.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028826/5087_Credal_Wrapper_of_Model_A-libre.pdf?1737909225=\u0026response-content-disposition=attachment%3B+filename%3DCredal_wrapper_of_model_averaging_for_un.pdf\u0026Expires=1743344601\u0026Signature=KpXyjTzFVIYNQnagqnhgli70C87iJA19QirMfd47z~xd7Glv9lCEADexzx-~-TR0KpVSrg~ND0hI-JZ9~H29fhcKXVWXR8yHhllLpUtLD6KbS2V63MlwXpLRONR8ouh7sLOX4ZHqOx5iUIeELLWLDNiPuC9P217pGFEjUA0Uhbh3NfL7KxmjBDYsDfpY8vza7Kr1VyONBuPcKZHkkiiR74bWEix5pmCJ7dhC7DNOmJD~ro~KJafNVkBCe-RYsuHPeCbZGacYAvQc20FjNZQY~Ef4NEFtLyDbhhS~QQvjyRnOv7-ADY2JXWrlqw5wX5DllUc8t48azRHg-kdXAOnvew__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":41815,"name":"Applied Probability","url":"https://www.academia.edu/Documents/in/Applied_Probability"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":177212,"name":"Interval analysis","url":"https://www.academia.edu/Documents/in/Interval_analysis"},{"id":1587858,"name":"Confidence Interval","url":"https://www.academia.edu/Documents/in/Confidence_Interval"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-127272202-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272113"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272113/A_Unified_Evaluation_Framework_for_Epistemic_Predictions_Anonymous_Author_Anonymous_Institution"><img alt="Research paper thumbnail of A Unified Evaluation Framework for Epistemic Predictions Anonymous Author Anonymous Institution" class="work-thumbnail" src="https://attachments.academia-assets.com/121028812/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272113/A_Unified_Evaluation_Framework_for_Epistemic_Predictions_Anonymous_Author_Anonymous_Institution">A Unified Evaluation Framework for Epistemic Predictions Anonymous Author Anonymous Institution</a></div><div class="wp-workCard_item"><span>Artificial Intelligence and Statistics (AISTATS 2025)</span><span>, 2025</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Predictions of uncertainty-aware models are diverse, ranging from single point estimates (often a...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Predictions of uncertainty-aware models are diverse, ranging from single point estimates (often averaged over prediction samples) to predictive distributions, to set-valued or credal-set representations. We propose a novel unified evaluation framework for uncertaintyaware classifiers, applicable to a wide range of model classes, which allows users to tailor the trade-off between accuracy and precision of predictions via a suitably designed performance metric. This makes possible the selection of the most suitable model for a particular real-world application as a function of the desired trade-off. Our experiments, concerning Bayesian, ensemble, evidential, deterministic, credal and belief function classifiers on the CIFAR-10, MNIST and CIFAR-100 datasets, show that the metric behaves as desired.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-127272113-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-127272113-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548677/table-9-model-selection-based-on-evaluation-metric-using-kl"><img alt="Table 9: Model Selection Based on Evaluation Metric using KL Divergence and Non-Specificity on the CIFAR-100 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548409/figure-1-different-types-of-uncertainty-aware-model"><img alt="Figure 1: Different types of uncertainty-aware model predictions, shown in a unit simplex of probability distributions defined on the list of classes Y = {a, b,c}. Our proposed evaluation framework uses a metric which combines, for each input x, a distance (arrows) between the corresponding ground truth (e.g., (0,1,0)) and the epistemic predictions generated by the various models (in the form of credal sets), and a measure of the extent of the credal prediction (non-specificity). " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548414/figure-2-ww-measures-of-kl-divergence-top-left-non"><img alt="Ww Figure 2: Measures of KL divergence (top left), Non-specificity (top right), Evaluation Metric (bottom left) for both Correctly (CC) and Incorrectly Classified (ICC) samples from CIFAR-10, and Evaluation metric vs trade-off parameter (bottom right), for all models, on the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548671/table-7-unified-evaluation-framework-for-epistemic"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548424/figure-3-visualizations-of-prediction-samples-obtained-prior"><img alt="Figure 3: Visualizations of 100 prediction samples obtained prior to Bayesian Model Averaging and corresponding Bayesian Model Averaged prediction in two real scenarios from CIFAR-10. where 0 is the set of sampled weights, @g, (x) is the prediction made by the model with weights 0; for input x, and ® is the function for the model. This process is called Bayesian Model Averaging (BMA). Fig. |3} illustrates two contrasting scenarios in which BMA proves advantageous in the first case (top), yet exhibits limitations as it discards all information in the second case (bottom). This can limit a BNN’s ability to accurately represent complex uncertainty patterns, potentially undermining its effectiveness in scenarios requiring reliable uncertainty quantification. BMA may inadvertently smooth out predictive distributions, diluting the inherent uncertainty present in individual models (Hinne et al] [2020} |Graefe et al.| (2015) as shown in Fig. When applied to classification, BMA yields point-wise predictions. For fair comparison and to overcome BMA’s limitations, in this paper we also use sets of prediction samples obtained from the different posterior weights before averaging. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548428/figure-4-visualizations-of-belief-and-mass-predictions-on"><img alt="Figure 4: Visualizations of belief and mass predictions on the power-set space and its mapping to the label space Y using pignistic probabilities on the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548433/figure-5-probability-simplices-illustrating-the-convex"><img alt="Figure 5: Probability simplices illustrating the convex closure of predictions and credal sets for the Bayesian model (LB-BNN) across three classes of the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548439/figure-6-comparison-of-kullback-leibler-kl-divergence-and"><img alt="Figure 6: Comparison of (a) Kullback-Leibler (KL) divergence, and (b) Jensen-Shannon (JS) divergence fot Correctly Classified (CC) and Incorrectly Classified (ICC) samples from the CIFAR-10 dataset, for all models considered here. Notably, the scales of these two measures differ significantly (Y-axis). " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548447/figure-7-comparison-of-mean-evaluation-metric-using-mean"><img alt="Figure 7: Comparison of mean Evaluation Metric € using mean Kullback Leibler (KL) divergence (top) and mean Jensen-Shannon (JS) divergence (bottom) for Correct (left) and Incorrect (right) predictions of the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548451/figure-8-scatter-plots-showing-the-relationship-between"><img alt="Figure 8: Scatter plots showing the relationship between uncertainty (KL and J S divergences) and non-specificity for correct and incorrect predictions across models. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548458/figure-9-comparison-of-non-specificity-ns-and-credal"><img alt="Figure 9: Comparison of (a) Non-Specificity (NS), and (b) Credal Uncertainty (CU) for Correctly Classified (CC) and Incorrectly Classified (ICC) samples from the CIFAR-10 dataset, for all models considered here. Notably, the scales of these two measures differ (Y-axis). These optimization problems can be addressed using standard solvers, such as the SciPy optimization package Virtanen et al.||2020). " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548461/figure-10-comparison-of-mean-evaluation-metric-using-mean"><img alt="Figure 10: Comparison of mean Evaluation Metric € using mean Non-Specificity (top) and mean Credal Uncertainty (bottom) for Correct (left) and Incorrect (right) predictions of the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548465/figure-11-unified-evaluation-framework-for-epistemic"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548470/figure-12-unified-evaluation-framework-for-epistemic"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548477/figure-13-ablation-study-on-the-number-of-prediction-samples"><img alt="Figure 13: Ablation study on the number of prediction samples of LB-BNN and the number of ensembles of DE with Evaluation Metric (€). increases, with a more pronounced effect seen in DE compared to LB-BNN. Results are shown for LB-BNN wit] 50 to 500 samples and DE with 5 to 30 ensembles. The increase in the number of samples leads to a correspondin; increase in the size of the credal set, without apparently being compensated by lower KL divergence values. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548481/figure-14-credal-set-sizes-for-all-models-for-prediction"><img alt="Figure 14: Credal set sizes for all models for 50 prediction samples of the CIFAR-10 dataset. Larger credal set sizes indicate a more imprecise prediction. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_014.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548490/figure-15-credal-set-sizes-for-all-models-for-prediction"><img alt="Figure 15: Credal set sizes for all models for 50 prediction samples of the MNIST dataset. Larger credal set sizes indicate a more imprecise prediction " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_015.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548497/figure-16-credal-set-sizes-for-all-models-for-prediction"><img alt="Figure 16: Credal set sizes for all models for 50 prediction samples of the CIFAR-100 dataset. Larger credal set sizes indicate a more imprecise prediction " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_016.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548554/figure-17-credal-set-size-vs-non-specificity-heatmap-for-lb"><img alt="Figure 17: Credal Set Size vs. Non-Specificity heatmap for (a) LB-BNN, (b) Deep Ensembles (DE), (c) EDL, (d) CreINN, (e) E-CNN and (f) RS-NN on the CIFAR-10 dataset. Credal Set Size and Non-Specificity are directly correlated to each other. Log frequency is used to better showcase the trend. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_017.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548570/figure-18-credal-set-size-vs-non-specificity-heatmap-for-lb"><img alt="Figure 18: Credal Set Size vs. Non-Specificity heatmap for (a) LB-BNN, (b) Deep Ensembles (DE), (c) EDL, (d) CreINN, (e) E-CNN and (f) RS-NN on the MNIST dataset. Credal Set Size and Non-Specificity are directly correlated to each other. Log frequency is used to better showcase the trend. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_018.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548591/figure-19-credal-set-size-vs-non-specificity-heatmap-for-lb"><img alt="Figure 19: Credal Set Size vs. Non-Specificity heatmap for (a) LB-BNN, (b) Deep Ensembles (DE), (c) EDL, (d) CreINN and (e) RS-NN on the CIFAR-100 dataset. Credal Set Size and Non-Specificity are directly correlated to each other. Log frequency is used to better showcase the trend. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_019.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548613/table-1-comparison-of-kullback-leibler-divergence-kl-non"><img alt="Table 1: Comparison of Kullback-Leibler divergence (KL), Non-Specificity (NS) and Evaluation Metric (€) for uncertainty-aware classifiers (trade-off \ = 1). Mean and std are shown for CIFAR-10, MNIST and CIFAR-100. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548627/table-3-comparison-of-kl-non-specificity-evaluation-metric"><img alt="Table 3: Comparison of KL, Non-Specificity, Evaluation Metric (€) calculated using approximated versus naive credal set vertices for LB-BNN and DE on the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548645/table-4-model-rankings-based-on-kl-and-js-divergence-on-the"><img alt="Table 4: Model Rankings Based on KL and JS Divergence on the CIFAR-10 dataset. Model selection is based on the mean of Evaluation Metric (€) with the models with the lowest € ranking first. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548653/table-5-model-rankings-based-on-non-specificity-ns-and"><img alt="Table 5: Model Rankings Based on Non-Specificity (NS) and Credal Uncertainty (CU) on the CIFAR-10 dataset. Model selection is based on the mean of Evaluation Metric (€) with the models with the lowest € ranking first. A The distance metric used here is KL divergence. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548658/table-6-comparison-of-kl-divergence-kl-non-specificity-ns"><img alt="Table 6: Comparison of KL divergence (KL), Non-Specificity (NS), and Evaluation Metric (€) (trade-off A = 1) for Correctly Classified (CC) and Incorrectly Classified (ICC) samples for each model on three datasets: CIFAR-10. MNIST, and CIFAR-100. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548665/table-8-model-selection-based-on-evaluation-metric-using-kl"><img alt="Table 8: Model Selection Based on Evaluation Metric using KL Divergence and Non-Specificity on the MNIST dataset. Table 7: Trade-off (A) vs Evaluation Metric (€) for different values of \ for the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_006.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-127272113-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="afd88cd36c4999237bf730ba5e1ee813" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121028812,"asset_id":127272113,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121028812/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272113"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272113"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272113; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272113]").text(description); $(".js-view-count[data-work-id=127272113]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272113; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272113']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "afd88cd36c4999237bf730ba5e1ee813" } } $('.js-work-strip[data-work-id=127272113]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272113,"title":"A Unified Evaluation Framework for Epistemic Predictions Anonymous Author Anonymous Institution","translated_title":"","metadata":{"abstract":"Predictions of uncertainty-aware models are diverse, ranging from single point estimates (often averaged over prediction samples) to predictive distributions, to set-valued or credal-set representations. We propose a novel unified evaluation framework for uncertaintyaware classifiers, applicable to a wide range of model classes, which allows users to tailor the trade-off between accuracy and precision of predictions via a suitably designed performance metric. This makes possible the selection of the most suitable model for a particular real-world application as a function of the desired trade-off. Our experiments, concerning Bayesian, ensemble, evidential, deterministic, credal and belief function classifiers on the CIFAR-10, MNIST and CIFAR-100 datasets, show that the metric behaves as desired.","publication_date":{"day":null,"month":null,"year":2025,"errors":{}},"publication_name":"Artificial Intelligence and Statistics (AISTATS 2025)"},"translated_abstract":"Predictions of uncertainty-aware models are diverse, ranging from single point estimates (often averaged over prediction samples) to predictive distributions, to set-valued or credal-set representations. We propose a novel unified evaluation framework for uncertaintyaware classifiers, applicable to a wide range of model classes, which allows users to tailor the trade-off between accuracy and precision of predictions via a suitably designed performance metric. This makes possible the selection of the most suitable model for a particular real-world application as a function of the desired trade-off. Our experiments, concerning Bayesian, ensemble, evidential, deterministic, credal and belief function classifiers on the CIFAR-10, MNIST and CIFAR-100 datasets, show that the metric behaves as desired.","internal_url":"https://www.academia.edu/127272113/A_Unified_Evaluation_Framework_for_Epistemic_Predictions_Anonymous_Author_Anonymous_Institution","translated_internal_url":"","created_at":"2025-01-26T07:28:24.618-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":43025003,"work_id":127272113,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604285,"email":"1***5@brookes.ac.uk","display_order":1,"name":"Shireen Kudukkil Manchingal","title":"A Unified Evaluation Framework for Epistemic Predictions Anonymous Author Anonymous Institution"},{"id":43025004,"work_id":127272113,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":2,"name":"Kaizheng Wang","title":"A Unified Evaluation Framework for Epistemic Predictions Anonymous Author Anonymous Institution"}],"downloadable_attachments":[{"id":121028812,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028812/thumbnails/1.jpg","file_name":"AISTATS25_Submitted_version.pdf","download_url":"https://www.academia.edu/attachments/121028812/download_file","bulk_download_file_name":"A_Unified_Evaluation_Framework_for_Epist.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028812/AISTATS25_Submitted_version.pdf?1737905300=\u0026response-content-disposition=attachment%3B+filename%3DA_Unified_Evaluation_Framework_for_Epist.pdf\u0026Expires=1743344601\u0026Signature=Pb~ZBWxfydOat8BeJTxXuT2GOlETd7Gx7RPonnQgRu14VNGHFuR9r-aKKFTAaphHvl1dqfRu-vBzQtD911200jVyaeZBJZR48mBtTlC0dm8HDJX928XoFidK0smjLn-ibVbGVYA78lQNDKHabHBgikCi5SO8ZKk3M~MM5N1Tho7IUIYOzFSWOo2xUG~mXcOzWRl4RgadOv1GKyDPXBJRKpH7Pvl7CsTTrmiyRpQpi16YsZkOzypR5Dch7r0AV0xGrTZlv4rUtjobq3cnv67wzIeYEybhU0ky1FHgRQyc1PnBE5qbbB3iIbfvgy2AzuvSEXdLZLa7zpFmuxd6arX71w__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"A_Unified_Evaluation_Framework_for_Epistemic_Predictions_Anonymous_Author_Anonymous_Institution","translated_slug":"","page_count":33,"language":"en","content_type":"Work","summary":"Predictions of uncertainty-aware models are diverse, ranging from single point estimates (often averaged over prediction samples) to predictive distributions, to set-valued or credal-set representations. We propose a novel unified evaluation framework for uncertaintyaware classifiers, applicable to a wide range of model classes, which allows users to tailor the trade-off between accuracy and precision of predictions via a suitably designed performance metric. This makes possible the selection of the most suitable model for a particular real-world application as a function of the desired trade-off. Our experiments, concerning Bayesian, ensemble, evidential, deterministic, credal and belief function classifiers on the CIFAR-10, MNIST and CIFAR-100 datasets, show that the metric behaves as desired.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121028812,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028812/thumbnails/1.jpg","file_name":"AISTATS25_Submitted_version.pdf","download_url":"https://www.academia.edu/attachments/121028812/download_file","bulk_download_file_name":"A_Unified_Evaluation_Framework_for_Epist.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028812/AISTATS25_Submitted_version.pdf?1737905300=\u0026response-content-disposition=attachment%3B+filename%3DA_Unified_Evaluation_Framework_for_Epist.pdf\u0026Expires=1743344601\u0026Signature=Pb~ZBWxfydOat8BeJTxXuT2GOlETd7Gx7RPonnQgRu14VNGHFuR9r-aKKFTAaphHvl1dqfRu-vBzQtD911200jVyaeZBJZR48mBtTlC0dm8HDJX928XoFidK0smjLn-ibVbGVYA78lQNDKHabHBgikCi5SO8ZKk3M~MM5N1Tho7IUIYOzFSWOo2xUG~mXcOzWRl4RgadOv1GKyDPXBJRKpH7Pvl7CsTTrmiyRpQpi16YsZkOzypR5Dch7r0AV0xGrTZlv4rUtjobq3cnv67wzIeYEybhU0ky1FHgRQyc1PnBE5qbbB3iIbfvgy2AzuvSEXdLZLa7zpFmuxd6arX71w__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":28512,"name":"Bayesian Networks","url":"https://www.academia.edu/Documents/in/Bayesian_Networks"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":41239,"name":"Bayesian statistics \u0026 modelling","url":"https://www.academia.edu/Documents/in/Bayesian_statistics_and_modelling"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-127272113-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="124827501"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/124827501/Neuroscience_for_AI_The_importance_of_Theory_of_Mind"><img alt="Research paper thumbnail of Neuroscience for AI: The importance of Theory of Mind" class="work-thumbnail" src="https://attachments.academia-assets.com/118981507/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/124827501/Neuroscience_for_AI_The_importance_of_Theory_of_Mind">Neuroscience for AI: The importance of Theory of Mind</a></div><div class="wp-workCard_item"><span>Developments in Neuroethics and Bioethics</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Understanding Theory of Mind is challenging as it can be viewed as a complex holistic process tha...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Understanding Theory of Mind is challenging as it can be viewed as a complex holistic process that can be decomposed into a number of hot and cold cognitive processes. Cold cognitive processes are non-emotional, whereas hot cognition is both social and emotional. Cold cognition includes working memory, cognitive flexibility and 'if-then' inferential logic and planning, processes which are used in non-social contexts, but which are often components of Theory of Mind tests. In social situations, we use our social cognition to process, remember and use information to explain and predict other people's behaviour, as well as our own. Therefore, strategic behaviour for goal achievement involving other people often relies on an interaction between hot and cold cognition. Similarly, for goal achievement in artificial intelligence (AI), for example robust performance in autonomous cars, or therapeutic interactions with humans, it is important to not only have the cold cognitive processes, which are well established in AI, but also the hot cognitive processes that require further development. This chapter will address hot cognitive processes, their underlying neural networks and how this information might be integrated in AI models to more successfully mimic the human brain and to enhance AI-human interactions. Finally, the importance of an integrated and interdisciplinary approach to AI models and the increasingly arising ethical issues in AI are discussed.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="289e5480205b5c4d3dcc29443ceef64b" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":118981507,"asset_id":124827501,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/118981507/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="124827501"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="124827501"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 124827501; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=124827501]").text(description); $(".js-view-count[data-work-id=124827501]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 124827501; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='124827501']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "289e5480205b5c4d3dcc29443ceef64b" } } $('.js-work-strip[data-work-id=124827501]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":124827501,"title":"Neuroscience for AI: The importance of Theory of Mind","translated_title":"","metadata":{"doi":"10.1016/bs.dnb.2024.03.001","volume":"7","abstract":"Understanding Theory of Mind is challenging as it can be viewed as a complex holistic process that can be decomposed into a number of hot and cold cognitive processes. Cold cognitive processes are non-emotional, whereas hot cognition is both social and emotional. Cold cognition includes working memory, cognitive flexibility and 'if-then' inferential logic and planning, processes which are used in non-social contexts, but which are often components of Theory of Mind tests. In social situations, we use our social cognition to process, remember and use information to explain and predict other people's behaviour, as well as our own. Therefore, strategic behaviour for goal achievement involving other people often relies on an interaction between hot and cold cognition. Similarly, for goal achievement in artificial intelligence (AI), for example robust performance in autonomous cars, or therapeutic interactions with humans, it is important to not only have the cold cognitive processes, which are well established in AI, but also the hot cognitive processes that require further development. This chapter will address hot cognitive processes, their underlying neural networks and how this information might be integrated in AI models to more successfully mimic the human brain and to enhance AI-human interactions. Finally, the importance of an integrated and interdisciplinary approach to AI models and the increasingly arising ethical issues in AI are discussed.","ai_title_tag":"Integrating Theory of Mind in AI Development","page_numbers":"65-83","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"Developments in Neuroethics and Bioethics"},"translated_abstract":"Understanding Theory of Mind is challenging as it can be viewed as a complex holistic process that can be decomposed into a number of hot and cold cognitive processes. Cold cognitive processes are non-emotional, whereas hot cognition is both social and emotional. Cold cognition includes working memory, cognitive flexibility and 'if-then' inferential logic and planning, processes which are used in non-social contexts, but which are often components of Theory of Mind tests. In social situations, we use our social cognition to process, remember and use information to explain and predict other people's behaviour, as well as our own. Therefore, strategic behaviour for goal achievement involving other people often relies on an interaction between hot and cold cognition. Similarly, for goal achievement in artificial intelligence (AI), for example robust performance in autonomous cars, or therapeutic interactions with humans, it is important to not only have the cold cognitive processes, which are well established in AI, but also the hot cognitive processes that require further development. This chapter will address hot cognitive processes, their underlying neural networks and how this information might be integrated in AI models to more successfully mimic the human brain and to enhance AI-human interactions. Finally, the importance of an integrated and interdisciplinary approach to AI models and the increasingly arising ethical issues in AI are discussed.","internal_url":"https://www.academia.edu/124827501/Neuroscience_for_AI_The_importance_of_Theory_of_Mind","translated_internal_url":"","created_at":"2024-10-18T01:14:39.543-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":42565046,"work_id":124827501,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5334584,"email":"f***n@inrialpes.fr","display_order":1,"name":"Fabio Cuzzolin","title":"Neuroscience for AI: The importance of Theory of Mind"},{"id":42565047,"work_id":124827501,"tagging_user_id":366407,"tagged_user_id":33434344,"co_author_invite_id":null,"email":"b***1@cam.ac.uk","display_order":2,"name":"Barbara Sahakian","title":"Neuroscience for AI: The importance of Theory of Mind"},{"id":42565048,"work_id":124827501,"tagging_user_id":366407,"tagged_user_id":107598522,"co_author_invite_id":null,"email":"l***e@gmail.com","display_order":3,"name":"Christelle Langley","title":"Neuroscience for AI: The importance of Theory of Mind"}],"downloadable_attachments":[{"id":118981507,"title":"","file_type":"docx","scribd_thumbnail_url":"https://attachments.academia-assets.com/118981507/thumbnails/1.jpg","file_name":"Neuroscience_for_AI_Chapter_Revisions_30.10.2023.docx","download_url":"https://www.academia.edu/attachments/118981507/download_file","bulk_download_file_name":"Neuroscience_for_AI_The_importance_of_Th.docx","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981507/Neuroscience_for_AI_Chapter_Revisions_30.10.2023.docx?1729239274=\u0026response-content-disposition=attachment%3B+filename%3DNeuroscience_for_AI_The_importance_of_Th.docx\u0026Expires=1743344601\u0026Signature=HY2HsROAM8mUbM~NRsA-ChddgCdlDQxlduYLBk48d80ZtqdE13QqCngccQfraX-fc9VFNgf7A1P5rV5r9jenopSgT8CXYH-HtYP2S0Va-N8TPwBRJ6A03-zkpRzsR0xjFm3TokMYf8ZPrFynNoXurdSy-iAc2GlESRWxVtKKNPCHLHVlL0FhR1y1lXKYMrAB1x8zW-2LYJW6uxqdBnyesmLC412amjamQZvuXPRNAJDVhIlJdzsdw9bSO28B3aBmBh6a~cNirNYt89kBWAgOEDhZR0rTGFom873C17FxN846wNNWpc3~NHGNX9ZGxQDQqsg0G5BzVec05-1I4gbupA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Neuroscience_for_AI_The_importance_of_Theory_of_Mind","translated_slug":"","page_count":14,"language":"en","content_type":"Work","summary":"Understanding Theory of Mind is challenging as it can be viewed as a complex holistic process that can be decomposed into a number of hot and cold cognitive processes. Cold cognitive processes are non-emotional, whereas hot cognition is both social and emotional. Cold cognition includes working memory, cognitive flexibility and 'if-then' inferential logic and planning, processes which are used in non-social contexts, but which are often components of Theory of Mind tests. In social situations, we use our social cognition to process, remember and use information to explain and predict other people's behaviour, as well as our own. Therefore, strategic behaviour for goal achievement involving other people often relies on an interaction between hot and cold cognition. Similarly, for goal achievement in artificial intelligence (AI), for example robust performance in autonomous cars, or therapeutic interactions with humans, it is important to not only have the cold cognitive processes, which are well established in AI, but also the hot cognitive processes that require further development. This chapter will address hot cognitive processes, their underlying neural networks and how this information might be integrated in AI models to more successfully mimic the human brain and to enhance AI-human interactions. Finally, the importance of an integrated and interdisciplinary approach to AI models and the increasingly arising ethical issues in AI are discussed.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":118981507,"title":"","file_type":"docx","scribd_thumbnail_url":"https://attachments.academia-assets.com/118981507/thumbnails/1.jpg","file_name":"Neuroscience_for_AI_Chapter_Revisions_30.10.2023.docx","download_url":"https://www.academia.edu/attachments/118981507/download_file","bulk_download_file_name":"Neuroscience_for_AI_The_importance_of_Th.docx","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981507/Neuroscience_for_AI_Chapter_Revisions_30.10.2023.docx?1729239274=\u0026response-content-disposition=attachment%3B+filename%3DNeuroscience_for_AI_The_importance_of_Th.docx\u0026Expires=1743344601\u0026Signature=HY2HsROAM8mUbM~NRsA-ChddgCdlDQxlduYLBk48d80ZtqdE13QqCngccQfraX-fc9VFNgf7A1P5rV5r9jenopSgT8CXYH-HtYP2S0Va-N8TPwBRJ6A03-zkpRzsR0xjFm3TokMYf8ZPrFynNoXurdSy-iAc2GlESRWxVtKKNPCHLHVlL0FhR1y1lXKYMrAB1x8zW-2LYJW6uxqdBnyesmLC412amjamQZvuXPRNAJDVhIlJdzsdw9bSO28B3aBmBh6a~cNirNYt89kBWAgOEDhZR0rTGFom873C17FxN846wNNWpc3~NHGNX9ZGxQDQqsg0G5BzVec05-1I4gbupA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":161,"name":"Neuroscience","url":"https://www.academia.edu/Documents/in/Neuroscience"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":4937,"name":"Theory of Mind","url":"https://www.academia.edu/Documents/in/Theory_of_Mind"},{"id":20053,"name":"Theory of Mind (Psychology)","url":"https://www.academia.edu/Documents/in/Theory_of_Mind_Psychology_"},{"id":20641,"name":"Future of artificial intelligence","url":"https://www.academia.edu/Documents/in/Future_of_artificial_intelligence"},{"id":25271,"name":"Artificial General Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_General_Intelligence"},{"id":74262,"name":"Philosophy of Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Philosophy_of_Artificial_Intelligence"}],"urls":[{"id":45208346,"url":"https://www.sciencedirect.com/science/article/pii/S2589295924000195"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-124827501-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="124827200"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/124827200/Feature_boosting_with_efficient_attention_for_scene_parsing"><img alt="Research paper thumbnail of Feature boosting with efficient attention for scene parsing" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/124827200/Feature_boosting_with_efficient_attention_for_scene_parsing">Feature boosting with efficient attention for scene parsing</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a> and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/VivekSingh2687">Vivek Singh</a></span></div><div class="wp-workCard_item"><span>Neurocomputing</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">The complexity of scene parsing grows with the number of object and scene classes, which is highe...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">The complexity of scene parsing grows with the number of object and scene classes, which is higher in unrestricted open scenes. The biggest challenge is to model the spatial relation between scene elements while succeeding in identifying objects at smaller scales. This paper presents a novel feature-boosting network that gathers spatial context from multiple levels of feature extraction and computes the attention weights for each level of representation to generate the final class labels. A novel 'channel attention module' is designed to compute the attention weights, ensuring that features from the relevant extraction stages are boosted while the others are attenuated. The model also learns spatial context information at low resolution to preserve the abstract spatial relationships among scene elements and reduce computation cost. Spatial attention is subsequently concatenated into a final feature set before applying feature boosting. Low-resolution spatial attention features are trained using an auxiliary task that helps learning a coarse global scene structure. The proposed model outperforms all state-ofthe-art models on both the ADE20K and the Cityscapes datasets.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-124827200-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-124827200-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659802/figure-1-samples-images-from-the-adek-dataset-zhou-et-al-to"><img alt="Figure 1: Samples images from the ADE20K dataset Zhou et al. (2017) to reflect the complexity of unre- stricted natural scenes. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659807/figure-2-complete-architecture-of-the-proposed-feature"><img alt="Figure 2: Complete architecture of the proposed Feature Boosting Network (FBNet). " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659813/figure-3-the-proposed-channel-attention-module-cam-used-in"><img alt="Figure 3: The proposed Channel Attention Module (CAM) used in FBNet. sification block which uses softmax to compute the class probabilities of each pixel. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659820/figure-4-plot-of-miou-achieved-versus-number-of-parameters"><img alt="Figure 4: Plot of (a) mIOU achieved versus number of parameters for different backbones in Table 2; (b mloU value against number of parameters for all the models in Table 4. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659831/figure-5-attention-maps-for-the-fbnet-trained-on-adek"><img alt="Figure 5: Attention maps for the FBNet trained on ADE20K dataset. First and second rows show attention maps for SAM (size: 64x86) and CAM (size: 128x171), respectively. Figure 5: Attention maps for the FBNet trained on ADE20K dataset. First and second rows show attention " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659840/figure-6-prediction-results-from-the-proposed-fbnet-against"><img alt="Figure 6: Prediction results from the proposed FBNet against the ground truth labels for a number of sample images from validation set of ADE20K dataset Zhou et al. (2017). The first row shows the original image, followed by the output of FBNet. The last row shows the ground truth label image. Figure 6: Prediction results from the proposed FBNet against the ground truth labels for a number of sample " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659848/table-1-ablation-study-evaluating-the-performance-of-fbnet"><img alt="Table 1: Ablation study evaluating the performance of FBNet under different combinations of its structural components. Table 1: Ablation study evaluating the performance of FBNet under different combinations of its structural " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/table_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659857/table-2-ablation-study-showing-the-performance-of-the"><img alt="Table 2: Ablation study showing the performance of the proposed model with different state-of-the-art back- bones. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/table_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659865/table-3-comparison-of-performance-against-state-of-the-art"><img alt="Table 3: Comparison of performance against state-of-the-art segmentation models on the ADE20k dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/table_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659873/table-4-comparison-of-performance-against-state-of-the-art"><img alt="Table 4: Comparison of performance against state-of-the-art segmentation models on the Cityscapes dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/table_004.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-124827200-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="afb6c186cadd6dc2693a5b006ce3ab76" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":118981272,"asset_id":124827200,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/118981272/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="124827200"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="124827200"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 124827200; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=124827200]").text(description); $(".js-view-count[data-work-id=124827200]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 124827200; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='124827200']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "afb6c186cadd6dc2693a5b006ce3ab76" } } $('.js-work-strip[data-work-id=124827200]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":124827200,"title":"Feature boosting with efficient attention for scene parsing","translated_title":"","metadata":{"doi":"10.1016/j.neucom.2024.128222","volume":"601","abstract":"The complexity of scene parsing grows with the number of object and scene classes, which is higher in unrestricted open scenes. The biggest challenge is to model the spatial relation between scene elements while succeeding in identifying objects at smaller scales. This paper presents a novel feature-boosting network that gathers spatial context from multiple levels of feature extraction and computes the attention weights for each level of representation to generate the final class labels. A novel 'channel attention module' is designed to compute the attention weights, ensuring that features from the relevant extraction stages are boosted while the others are attenuated. The model also learns spatial context information at low resolution to preserve the abstract spatial relationships among scene elements and reduce computation cost. Spatial attention is subsequently concatenated into a final feature set before applying feature boosting. Low-resolution spatial attention features are trained using an auxiliary task that helps learning a coarse global scene structure. The proposed model outperforms all state-ofthe-art models on both the ADE20K and the Cityscapes datasets.","publisher":"Elsevier","page_numbers":"128222","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"Neurocomputing"},"translated_abstract":"The complexity of scene parsing grows with the number of object and scene classes, which is higher in unrestricted open scenes. The biggest challenge is to model the spatial relation between scene elements while succeeding in identifying objects at smaller scales. This paper presents a novel feature-boosting network that gathers spatial context from multiple levels of feature extraction and computes the attention weights for each level of representation to generate the final class labels. A novel 'channel attention module' is designed to compute the attention weights, ensuring that features from the relevant extraction stages are boosted while the others are attenuated. The model also learns spatial context information at low resolution to preserve the abstract spatial relationships among scene elements and reduce computation cost. Spatial attention is subsequently concatenated into a final feature set before applying feature boosting. Low-resolution spatial attention features are trained using an auxiliary task that helps learning a coarse global scene structure. The proposed model outperforms all state-ofthe-art models on both the ADE20K and the Cityscapes datasets.","internal_url":"https://www.academia.edu/124827200/Feature_boosting_with_efficient_attention_for_scene_parsing","translated_internal_url":"","created_at":"2024-10-18T01:02:39.803-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":42564994,"work_id":124827200,"tagging_user_id":366407,"tagged_user_id":329431652,"co_author_invite_id":8265708,"email":"v***h@plymouth.ac.uk","display_order":1,"name":"Vivek Singh","title":"Feature boosting with efficient attention for scene parsing"},{"id":42564995,"work_id":124827200,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8265709,"email":"s***a@leeds.ac.uk","display_order":2,"name":"Shailza Sharma","title":"Feature boosting with efficient attention for scene parsing"}],"downloadable_attachments":[{"id":118981272,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://a.academia-assets.com/images/blank-paper.jpg","file_name":"2402.19250v1.pdf","download_url":"https://www.academia.edu/attachments/118981272/download_file","bulk_download_file_name":"Feature_boosting_with_efficient_attentio.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981272/2402.19250v1-libre.pdf?1729240630=\u0026response-content-disposition=attachment%3B+filename%3DFeature_boosting_with_efficient_attentio.pdf\u0026Expires=1743344601\u0026Signature=Fz94PwvkAzHav614XxMlkr8mTQ8yMg1T6N0ic95g5id94PSAOiYqcXydksjpBsFlE6ysrcAvwdunhwmWTmzRv-gPD5S-I84HMmZ3fbeMKHOYg33MpWVzjyhthN2yJwy2LfI0RugnSQhK~tZZ1uD-~~~DXxk0vNHdHmrh9CKgRbIIikF1hsJl8lEi2TPlUvtwQqD3BPcTr4363k4KaLhqs~6jZ9PQLM81ZJ8i8Csl1ZQebmotAD3V5MjBjxeOL~DiBOWUSONKTfW5EbYw0bxioY~1uTSsB4l8~HhRs8Vd7n8ApPhlHJmK5qk8sk60NusW9-vzzudcIXQFONYFJ1fF~g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Feature_boosting_with_efficient_attention_for_scene_parsing","translated_slug":"","page_count":null,"language":"en","content_type":"Work","summary":"The complexity of scene parsing grows with the number of object and scene classes, which is higher in unrestricted open scenes. The biggest challenge is to model the spatial relation between scene elements while succeeding in identifying objects at smaller scales. This paper presents a novel feature-boosting network that gathers spatial context from multiple levels of feature extraction and computes the attention weights for each level of representation to generate the final class labels. A novel 'channel attention module' is designed to compute the attention weights, ensuring that features from the relevant extraction stages are boosted while the others are attenuated. The model also learns spatial context information at low resolution to preserve the abstract spatial relationships among scene elements and reduce computation cost. Spatial attention is subsequently concatenated into a final feature set before applying feature boosting. Low-resolution spatial attention features are trained using an auxiliary task that helps learning a coarse global scene structure. The proposed model outperforms all state-ofthe-art models on both the ADE20K and the Cityscapes datasets.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":118981272,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://a.academia-assets.com/images/blank-paper.jpg","file_name":"2402.19250v1.pdf","download_url":"https://www.academia.edu/attachments/118981272/download_file","bulk_download_file_name":"Feature_boosting_with_efficient_attentio.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981272/2402.19250v1-libre.pdf?1729240630=\u0026response-content-disposition=attachment%3B+filename%3DFeature_boosting_with_efficient_attentio.pdf\u0026Expires=1743344601\u0026Signature=Fz94PwvkAzHav614XxMlkr8mTQ8yMg1T6N0ic95g5id94PSAOiYqcXydksjpBsFlE6ysrcAvwdunhwmWTmzRv-gPD5S-I84HMmZ3fbeMKHOYg33MpWVzjyhthN2yJwy2LfI0RugnSQhK~tZZ1uD-~~~DXxk0vNHdHmrh9CKgRbIIikF1hsJl8lEi2TPlUvtwQqD3BPcTr4363k4KaLhqs~6jZ9PQLM81ZJ8i8Csl1ZQebmotAD3V5MjBjxeOL~DiBOWUSONKTfW5EbYw0bxioY~1uTSsB4l8~HhRs8Vd7n8ApPhlHJmK5qk8sk60NusW9-vzzudcIXQFONYFJ1fF~g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":26870,"name":"Image segmentation","url":"https://www.academia.edu/Documents/in/Image_segmentation"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[{"id":45208236,"url":"https://www.sciencedirect.com/science/article/pii/S0925231224009937"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-124827200-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="124810485"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/124810485/Uncertainty_measures_A_critical_survey"><img alt="Research paper thumbnail of Uncertainty measures: A critical survey" class="work-thumbnail" src="https://attachments.academia-assets.com/118981426/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/124810485/Uncertainty_measures_A_critical_survey">Uncertainty measures: A critical survey</a></div><div class="wp-workCard_item"><span>Information Fusion</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Classical probability is not the only mathematical theory of uncertainty, or the most general. Ma...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Classical probability is not the only mathematical theory of uncertainty, or the most general. Many authors have argued that probability theory is ill-equipped to model the 'epistemic', reducible uncertainty about the process generating the data. To address this, many alternative theories of uncertainty have been formulated. In this paper, we highlight how uncertainty theories can be seen as forming clusters characterised by a shared rationale, are connected to each other in an intricate but interesting way, and can be ranked according to their degree of generality. Our objective is to propose a structured, critical summary of the research landscape in uncertainty theory, and discuss its potential for wider adoption in artificial intelligence.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="c93124f2ca035b798fb0c7dae13309c7" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":118981426,"asset_id":124810485,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/118981426/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="124810485"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="124810485"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 124810485; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=124810485]").text(description); $(".js-view-count[data-work-id=124810485]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 124810485; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='124810485']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "c93124f2ca035b798fb0c7dae13309c7" } } $('.js-work-strip[data-work-id=124810485]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":124810485,"title":"Uncertainty measures: A critical survey","translated_title":"","metadata":{"doi":"10.1016/j.inffus.2024.102609","volume":"114","abstract":"Classical probability is not the only mathematical theory of uncertainty, or the most general. Many authors have argued that probability theory is ill-equipped to model the 'epistemic', reducible uncertainty about the process generating the data. To address this, many alternative theories of uncertainty have been formulated. In this paper, we highlight how uncertainty theories can be seen as forming clusters characterised by a shared rationale, are connected to each other in an intricate but interesting way, and can be ranked according to their degree of generality. Our objective is to propose a structured, critical summary of the research landscape in uncertainty theory, and discuss its potential for wider adoption in artificial intelligence.","page_numbers":"102609","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"Information Fusion"},"translated_abstract":"Classical probability is not the only mathematical theory of uncertainty, or the most general. Many authors have argued that probability theory is ill-equipped to model the 'epistemic', reducible uncertainty about the process generating the data. To address this, many alternative theories of uncertainty have been formulated. In this paper, we highlight how uncertainty theories can be seen as forming clusters characterised by a shared rationale, are connected to each other in an intricate but interesting way, and can be ranked according to their degree of generality. Our objective is to propose a structured, critical summary of the research landscape in uncertainty theory, and discuss its potential for wider adoption in artificial intelligence.","internal_url":"https://www.academia.edu/124810485/Uncertainty_measures_A_critical_survey","translated_internal_url":"","created_at":"2024-10-17T08:04:21.420-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":118981426,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118981426/thumbnails/1.jpg","file_name":"Information_Fusion_survey_revised_version.pdf","download_url":"https://www.academia.edu/attachments/118981426/download_file","bulk_download_file_name":"Uncertainty_measures_A_critical_survey.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981426/Information_Fusion_survey_revised_version-libre.pdf?1729240614=\u0026response-content-disposition=attachment%3B+filename%3DUncertainty_measures_A_critical_survey.pdf\u0026Expires=1743344602\u0026Signature=Z56LfSw0~pOAyOF64dmiZwH7ldO9HGBgF9GLet~l8i3IlVldtYZd5tI2NrztbV244dyA4avf8Iq0b7iGMQa-FlxV8EAUd1z5tNU5RyqplNMprHjVk39dJgcnsrCaplh7GH0VoW~IgylbCYzGU~14LyHNPUWq38WV84B2Dtfkz~XeTgf4vAV6cpACrRzoVYhR2v-h2PdLyQ6-i8SQzp2wGw-hIoZveOhRpfCq4ngfOlHSBJ1Kmd2UgY45LcBRD84pWk~SqWPt1b-2niwE-Hi6jytETFARIdhJgcfzTxu2iYaFZhJLju~CXKsP-6nNaHewJ-bBRej0UAQIfqDRDs-2-Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Uncertainty_measures_A_critical_survey","translated_slug":"","page_count":83,"language":"en","content_type":"Work","summary":"Classical probability is not the only mathematical theory of uncertainty, or the most general. Many authors have argued that probability theory is ill-equipped to model the 'epistemic', reducible uncertainty about the process generating the data. To address this, many alternative theories of uncertainty have been formulated. In this paper, we highlight how uncertainty theories can be seen as forming clusters characterised by a shared rationale, are connected to each other in an intricate but interesting way, and can be ranked according to their degree of generality. Our objective is to propose a structured, critical summary of the research landscape in uncertainty theory, and discuss its potential for wider adoption in artificial intelligence.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":118981426,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118981426/thumbnails/1.jpg","file_name":"Information_Fusion_survey_revised_version.pdf","download_url":"https://www.academia.edu/attachments/118981426/download_file","bulk_download_file_name":"Uncertainty_measures_A_critical_survey.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981426/Information_Fusion_survey_revised_version-libre.pdf?1729240614=\u0026response-content-disposition=attachment%3B+filename%3DUncertainty_measures_A_critical_survey.pdf\u0026Expires=1743344602\u0026Signature=Z56LfSw0~pOAyOF64dmiZwH7ldO9HGBgF9GLet~l8i3IlVldtYZd5tI2NrztbV244dyA4avf8Iq0b7iGMQa-FlxV8EAUd1z5tNU5RyqplNMprHjVk39dJgcnsrCaplh7GH0VoW~IgylbCYzGU~14LyHNPUWq38WV84B2Dtfkz~XeTgf4vAV6cpACrRzoVYhR2v-h2PdLyQ6-i8SQzp2wGw-hIoZveOhRpfCq4ngfOlHSBJ1Kmd2UgY45LcBRD84pWk~SqWPt1b-2niwE-Hi6jytETFARIdhJgcfzTxu2iYaFZhJLju~CXKsP-6nNaHewJ-bBRej0UAQIfqDRDs-2-Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"},{"id":388873,"name":"Mathematics and Statistics","url":"https://www.academia.edu/Documents/in/Mathematics_and_Statistics"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"}],"urls":[{"id":45208261,"url":"https://www.sciencedirect.com/science/article/pii/S1566253524003877"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-124810485-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="124810395"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/124810395/Credal_Learning_Theory"><img alt="Research paper thumbnail of Credal Learning Theory" class="work-thumbnail" src="https://attachments.academia-assets.com/118967096/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/124810395/Credal_Learning_Theory">Credal Learning Theory</a></div><div class="wp-workCard_item"><span>2024 Conference on Neural Information Processing Systems (NeurIPS 2024)</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Statistical learning theory is the foundation of machine learning, providing theoretical bounds f...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learned from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not), as well as infinite model spaces, which directly generalize classical results.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="3046158ed1749f50f1b6cae0276d85c9" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":118967096,"asset_id":124810395,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/118967096/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="124810395"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="124810395"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 124810395; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=124810395]").text(description); $(".js-view-count[data-work-id=124810395]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 124810395; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='124810395']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "3046158ed1749f50f1b6cae0276d85c9" } } $('.js-work-strip[data-work-id=124810395]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":124810395,"title":"Credal Learning Theory","translated_title":"","metadata":{"abstract":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learned from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not), as well as infinite model spaces, which directly generalize classical results.","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"2024 Conference on Neural Information Processing Systems (NeurIPS 2024)"},"translated_abstract":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learned from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not), as well as infinite model spaces, which directly generalize classical results.","internal_url":"https://www.academia.edu/124810395/Credal_Learning_Theory","translated_internal_url":"","created_at":"2024-10-17T08:00:27.602-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":42562009,"work_id":124810395,"tagging_user_id":366407,"tagged_user_id":303284276,"co_author_invite_id":null,"email":"c***o@seas.upenn.edu","display_order":1,"name":"Michele Caprio","title":"Credal Learning Theory"},{"id":42562010,"work_id":124810395,"tagging_user_id":366407,"tagged_user_id":13178584,"co_author_invite_id":null,"email":"e***1@sheffield.ac.uk","affiliation":"The University of Sheffield","display_order":2,"name":"Eleni Elia","title":"Credal Learning Theory"}],"downloadable_attachments":[{"id":118967096,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118967096/thumbnails/1.jpg","file_name":"NeurIPS_2024_Credal_Learning_Theory.pdf","download_url":"https://www.academia.edu/attachments/118967096/download_file","bulk_download_file_name":"Credal_Learning_Theory.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118967096/NeurIPS_2024_Credal_Learning_Theory-libre.pdf?1729179331=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Learning_Theory.pdf\u0026Expires=1743344602\u0026Signature=TocZhlNYTOyeyLUFECKoUAhfp~dJ94segDBQtba21LC1OFZ53My2ibtWp0IkoJSgFyhRshFeUKSjGld7ueba4CFch6cyyBqmCpVoZOs0UEsKnHhU3I6JQlsfhMathMc5onbVCM5uGZJ0GflAhShSnIbOTGpx4w5yA~Cx6tcYdb0h4tm7ZdKM8~T~e0c9BIJfjlNd61rLZnXpAk9hNc~AAghRpzyKXshIG-33Tv7tJ5H8bfzh3UY~IFR8yYdFizxnESIQcfaqf35slvi3ZY5quUKhtOsUx-ix1uQ6BQ-bjbeI~p82bsjKHL6D0nSAgCimnteKpC-m2OtSWOWMtRNq~g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Credal_Learning_Theory","translated_slug":"","page_count":30,"language":"en","content_type":"Work","summary":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learned from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not), as well as infinite model spaces, which directly generalize classical results.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":118967096,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118967096/thumbnails/1.jpg","file_name":"NeurIPS_2024_Credal_Learning_Theory.pdf","download_url":"https://www.academia.edu/attachments/118967096/download_file","bulk_download_file_name":"Credal_Learning_Theory.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118967096/NeurIPS_2024_Credal_Learning_Theory-libre.pdf?1729179331=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Learning_Theory.pdf\u0026Expires=1743344602\u0026Signature=TocZhlNYTOyeyLUFECKoUAhfp~dJ94segDBQtba21LC1OFZ53My2ibtWp0IkoJSgFyhRshFeUKSjGld7ueba4CFch6cyyBqmCpVoZOs0UEsKnHhU3I6JQlsfhMathMc5onbVCM5uGZJ0GflAhShSnIbOTGpx4w5yA~Cx6tcYdb0h4tm7ZdKM8~T~e0c9BIJfjlNd61rLZnXpAk9hNc~AAghRpzyKXshIG-33Tv7tJ5H8bfzh3UY~IFR8yYdFizxnESIQcfaqf35slvi3ZY5quUKhtOsUx-ix1uQ6BQ-bjbeI~p82bsjKHL6D0nSAgCimnteKpC-m2OtSWOWMtRNq~g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":20641,"name":"Future of artificial intelligence","url":"https://www.academia.edu/Documents/in/Future_of_artificial_intelligence"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":38246,"name":"Statistical Learning","url":"https://www.academia.edu/Documents/in/Statistical_Learning"},{"id":67968,"name":"Statistical Inference","url":"https://www.academia.edu/Documents/in/Statistical_Inference"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"}],"urls":[{"id":45208272,"url":"https://neurips.cc/virtual/2024/poster/96268"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-124810395-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="124810326"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/124810326/Credal_Deep_Ensembles_for_Uncertainty_Quantification"><img alt="Research paper thumbnail of Credal Deep Ensembles for Uncertainty Quantification" class="work-thumbnail" src="https://attachments.academia-assets.com/118967032/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/124810326/Credal_Deep_Ensembles_for_Uncertainty_Quantification">Credal Deep Ensembles for Uncertainty Quantification</a></div><div class="wp-workCard_item"><span>2024 Conference on Neural Information Processing Systems (NeurIPS 2024)</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">This paper introduces an innovative approach to classification called Credal Deep Ensembles (CreD...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">This paper introduces an innovative approach to classification called Credal Deep Ensembles (CreDEs), namely, ensembles of novel Credal-Set Neural Networks (CreNets). CreNets are trained to predict a lower and an upper probability bound for each class, which, in turn, determine a convex set of probabilities (credal set) on the class set. The training employs a loss inspired by distributionally robust optimization which simulates the potential divergence of the test distribution from the training distribution, in such a way that the width of the predicted probability interval reflects the 'epistemic' uncertainty about the future data distribution. Ensembles can be constructed by training multiple CreNets, each associated with a different random seed, and averaging the outputted intervals. Extensive experiments are conducted on various out-of-distributions (OOD) detection benchmarks (CIFAR10/100 vs SVHN/Tiny-ImageNet, CIFAR10 vs CIFAR10-C, ImageNet vs ImageNet-O) and using different network architectures (ResNet50, VGG16, and ViT Base). Compared to Deep Ensemble baselines, CreDEs demonstrate higher test accuracy, lower expected calibration error, and significantly improved epistemic uncertainty estimation.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-124810326-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-124810326-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658382/table-7-ood-detection-auroc-and-auprc-performance-between"><img alt="Table 7: OOD detection AUROC and AUPRC performance (%, +) between CreDEs-5 and DEs-5 based on ResNet50 using EU as uncertainty metrics on CIFAR10/100 vs. SVHN/Tiny-ImageNet anc ImageNet vs. ImageNet-O. Results are averaged over 15 runs. Best results are in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658387/table-8-auroc-and-auprc-scores-for-ood-detection-on-cifario"><img alt="Table 8: AUROC and AUPRC scores (%, t) for OOD detection on CIFARIO vs SVHN/Tiny- ImageNet. Results averaged over 15 runs. The Best results are in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658273/figure-1-extensive-experimental-validation-is-conducted-on"><img alt="Extensive experimental validation is conducted on several OOD detection benchmarks, including CI- FAR10/100 (ID) vs SVHN/Tiny-ImageNet (OOD), CIFAR10 (ID) vs CIFAR10-C (OOD), ImageNet (ID) vs ImageNet-O (OOD), and across different network architectures: ResNet50, VGG16 and Visual Transformer Base (ViT Base). Compared to traditional Deep Ensembles, our CreDEs achieve higher test accuracy and lower expected calibration error (ECE) on ID samples, and significantly improve the quality of EU estimation. Figure 1: Comparison between the proposed Credal Deep Ensembles and traditional Deep Ensem- bles. The former aggregate a collection of credal set predictions from CreNets as the final (credal) prediction, whereas the latter average a set of single probability distributions from standard SNNs as the outcome. E.g., in the probability simplex [16] associated with the target space Y = {A, B, D} (the triangle in the figure), a probability vector (q4, ge, dp) is represented as a single point. For each CreNet, the predicted lower and upper probabilities of each class act as constraints (parallel lines) which determine a credal prediction (in gray). Single credal predictions are aggregated as in Sec. 2.4. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658283/figure-2-wou-crenet-final-layer-structure-for-three-sures"><img alt="wou) Figure 2: CreNet final layer structure for three sures classes. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658393/table-9-ood-detection-auroc-and-auprc-performance-between"><img alt="Table 9: OOD detection AUROC and AUPRC performance (%, +) between CreDEs-5 and DEs-5 based on ResNet50 using TU as uncertainty metrics on CIFAR10/100 vs. SVHN/Tiny-ImageNet and ImageNet vs. ImageNet-O. Results are averaged over 15 runs. Best results in bold. Table 10: OOD detection AUROC and AUPRC performance (%, t) between CreDEs-5 and DEs-S based on VGG16 and ViT Base using TU as uncertainty metrics on CIFAR10 vs. SVHN/Tiny ImageNet. Results are averaged over 15 runs. Best results in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658400/table-11-we-compare-test-accuracy-and-ece-for-des-and-credes"><img alt="We compare test accuracy and ECE for DEs*-5 and CreDEs-5 in Table 11, and their OOD detection performance on the CIFAR10/100 (ID) vs SVHN/Tiny-ImageNet (OOD) benchmark in Table 12. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658406/table-11-test-accuracy-and-ece-of-des-and-credes-on-the"><img alt="Table 11: Test accuracy and ECE of DEs*-5 and CreDEs-5 on the CIFAR10 and CIFAR100 datasets Best results in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658376/table-5-the-full-gh-calculation-process-is-presented-in"><img alt="The full GH(Q) calculation process is presented in Algorithm 3. Although the use of probability intervals simplifies the calculation of GH(Q) in general, a significant challenge arises for large values of C (e.g. C =100) due to the complexity of involving subsets of C’. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658293/figure-5-maximum-reachable-upper-probability-max-qt-qu-per"><img alt="Figure 5: Maximum reachable upper probability max (qt, ; — qu.) per sample from 15 runs. Qualitative Evaluation Due to the high dimensionality, visualizing or directly computing the size of the credal set becomes challenging as C’ increases. Consequently, we indirectly evaluate whether ou CreDEs consistently generate nearly Dirac credal sets as predictions through the maximum attainable upper bound probability of the prediction. The closer this probability is to 1, the more it approximates a Dirac credal set. Figure 5 shows the results of ResNet50-based CreDEs-5 for the CIFAR10, SVHN and Tiny-ImageNet datasets. It verifies that our method does not consistently generate nearly Dirac credal sets, especially for OOD samples. For CIFAR10, a substantial proportion of (but not all) the credal sets are quasi-Dirac. This observation is reasonable as it is consistent with the high test accuracy of CreDEs and the low ECE reported in Table 1. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658301/figure-6-reliability-diagram-of-resnet-based-des-and-eu"><img alt="Figure 6: Reliability diagram of ResNet50-based DEs-5 and Figure 7: EU estimates compari- CreDEs-5 (using i;n4n and imax, fespectively) on CIFAR10. son of ResNet50-based models. Figure 8 compares OOD detection performance in the CIFAR1O vs CIFARIO-C setting against the intensity of corruption, using both AUPRC and AUROC as metrics. The results consistently demonstrate that CreDEs achieve higher test accuracy, lower ECE, and significantly improved epistemic uncertainty estimation, leading to enhanced OOD detection performance. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658309/table-3-test-accuracy-and-ece-of-des-and-credes-on-cifar-as"><img alt="Table 3: Test accuracy (%, +) and ECE (|) of DEs-5 and CreDEs-5 on CIFAR10 as ID dataset (left). AUROC and AUPRC scores (%, 1) for OOD detection on CIFAR10 vs SVHN/Tiny-ImageNet (right). Results averaged over 15 runs. The Best results are in bold. Figure 8: OOD detection on CIFAR10 vs CIFAR10-C against increased corruption intensity, using VGG16 and ViT Base as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658315/figure-9-ood-detection-on-cifar-vs-cifar-against-increased"><img alt="Figure 9: OOD detection on CIFAR10 vs CIFAR10-C against increased corruption intensity, using ResNet50, VGG16, and ViT Base as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658323/figure-10-average-time-cost-of-gh-black-dotted-line-and-gh"><img alt="Figure 10: Average time cost of GH(Q) (black dotted line) and GH(Q) value per sample across various datasets (blue lines), along with the AUROC/AUPRC scores (green/purple lines) for OOD detection versus increasing values of Ix. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658331/figure-11-average-time-cost-average-value-per-sample-and-ood"><img alt="Figure 11: Average (Q) time cost, average H(Q) value per sample, and OOD performance on the OOD detection benchmark (CIFAR100 vs. SVHN/Tiny-ImageNet) for increasing values of K. The reported time cost is measured on a single Intel Xeon Gold 8358 CPU@2.6 GHz, without optimization in the calculation process. We believe a more efficient code implementation could significantly mitigate this. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658336/table-15-ood-detection-auroc-and-auprc-performance-between"><img alt="Table 15: OOD detection AUROC and AUPRC performance (%, +) between CreDEs-5 and Bayesian models based on ResNet50 using EU and TU as uncertainty metrics on CIFAR10 vs. SVHN/Tiny- ImageNet. Results are averaged over 15 runs. The best results are in bold. The ‘drop’ denotes the dropout rate applied to MCDropout. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658341/figure-13-averaged-training-and-validation-val-accuracy-for"><img alt="Figure 13: Averaged training and validation (Val) accuracy (%) for CreNets and SNNs over 15 runs. The U and L in the labels of CreNets represent accuracies associated with upper and lower probability bounds, namely 2 imax and 2 dmins respectively. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658344/figure-14-these-alternative-methods-are-illustrated-in"><img alt="These alternative methods are illustrated in Figure 14. Figure 14: Representation of three ensemble approaches: averaging (a), union (b), and intersection (c). In each subfigure, the ultimate credal set (highlighted in dark red) is formed by aggregating two individual credal sets, each constrained by probability intervals indicated in light green and blue. respectively. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658346/figure-15-empirical-evaluation-in-this-experiment-we-mainly"><img alt="Empirical evaluation In this experiment, we mainly evaluate the impact of averaging and union ensemble approaches on the EU estimation (GH(Q)) of CreDEs. Utilizing 15 individually trained ResNet50-based CreNets on CIFAR10 dataset, we formulate 15 CreDEs-M by varying the ensemble number M from 2 to 10 through averaging and union ensemble methodologies. Each kind of CreDEs- M is assessed for the averaged GH(Q) concerning samples and the quantity of CreDEs-M, and the averaged standard deviation (STD) of GH(Q) related to samples and the quantity of CreDEs-M. The results are plotted in Figure 15 (b) and (a), respectively. Besides, we also present the AUPRC and AUROC scores of OOD detection using GH(Q) as the uncertainty metric in Figure 15 (c) and (d), accordingly. Figure 15: Impact of averaging (Avg) and union on the EU estimation of CreDEs on OOD detectior benchmark involving CIFAR10 vs. SVHN/Tiny-ImageNet (TinyImage), implemented on ResNet5( architecture. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658350/figure-16-concept-of-credal-regressor-one-could-then-train"><img alt="Figure 16: Concept of a credal regressor. One could then train an ensemble of Bayesian regressor networks to predict a credal set with a fixed number of vertices (one network outputting one vertex probability) so that the final predicted credal set is the convex closure of those. Figure 16 illustrates the concept briefly. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658352/table-1-eu-quantification-for-ood-detection-it-is-our"><img alt="EU Quantification for OOD Detection It is our hypothesis that OOD data express a higher EU. Hence, we can use a better EU quantification as the means to improve the OOD detec- Table 1: Test accuracy (%, +) and ECE (|) of DEs-5 and CreDEs-5 using CIFAR10, CIFAR100, and ImageNet as ID datasets over 15 runs. The better performance is marked in bold. Table | reports the test accuracy and ECE for DEs-5 and CreDEs-5 on the various datasets, indicating that our CreDEs-5 achieved higher test accuracy and lower ECE on ID samples. Note that employing the imin prediction showed higher ECE on the challenging ImageNet dataset. This is likely because the strategy, selecting the class with the highest lower reachable probability, is a conservative one. Table 1: Test accuracy (%, ¢) and ECE (|) of DEs-5 and CreDEs-5 using CIFAR10, CIFAR100, and 5 i: I a - oe en << . . n -/ : a OT Fee | " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658357/table-2-ood-detection-auroc-and-auprc-performance-between"><img alt="Table 2: OOD detection AUROC and AUPRC performance (%, +) between CreDEs-5 and DEs-5 based on ResNet50 using EU as uncertainty metrics on CIFAR10/100 vs. SVHN/Tiny-ImageNet and ImageNet vs. ImageNet-O. Results are averaged over 15 runs. Best results in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658364/table-4-test-accuracy-and-ood-detection-performance-of"><img alt="Table 4: Test accuracy (%, +) and OOD detection performance (%, +) of CreDEs-5 using various 06. Results are averaged over 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658370/table-5-averaged-eu-estimates-of-credes-using-various-model"><img alt="Table 5: Averaged EU estimates of CreDEs-5 using various 6. Model Inference Complexity Table 6 reports the parameter count and inference cost on one NVIDIA A100-SXM4-40GB GPU for a single SNN and CreNet on ImageNet. CreNets show a marginal increase in complexity due to its minor architectural modifications. More discussions on the inference and training complexity are presented in Appendix §C. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658414/table-12-ood-detection-performance-comparison-of-des-and"><img alt="Table 12: OOD detection performance comparison of DEs*-5 and CreDEs-5 using the dataset pairs CIFAR10/100 (ID) vs SVHN/Tiny-ImageNet (OOD). The reported results demonstrate that CreDEs-5 outperforms DEs*-5 ensembles by achieving higher test accuracy and lower ECE values. Concerning OOD detection tasks, it can be found that CreDEs in general improve the AUPRC and AUROC scores using either the TU or the EU metric, pretty much across the board. These results suggest that CreDEs provide higher-quality EU and TU estimation. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658418/table-13-test-accuracy-acc-and-ece-comparison-on-the-cifar"><img alt="Table 13: Test accuracy (ACC) (%) and ECE comparison on the CIFAR10 dataset, using the ResNet50 VGG16, and ViT Base architectures. Table 14: OOD detection performance comparison (%) on CIFAR10 vs SVHN/Tiny-ImageNet, using the ResNet50, VGG16, and ViT Base architectures. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658425/table-13-credes-des-des"><img alt="CreDEs-5 95.71+0.42 97.73+0.27 DEs-5 94.80+0.43 97.26+0.29 DEs?-5 93.90+0.24 96. 1040.21 189.02+0.10 88.02+0.15| 88.80+0.19 87.21+0.29 88.03+0.20 84.11+0.32 87.05+0.80 93.36+0.42 84.50+0.49 90.78+0.35 84.10+0.22 89.83+0.16 82.14+0.14 80.81+0.16) 79.40+0.1075.91+0.14} 78.11+0.08 72.23+0.16 87.30+1.77 92.24+1.15| 79.8041.75 87.9741.17 82.41+1.56 88.51+0.95 88.17+0.44 86.94+0.60 83.81+0.81 81.67+0.89 83.21+1.02 78.2441.17 " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658430/table-14-in-each-iteration-we-acquire-the-samples-with-the"><img alt="In each iteration, we acquire the 5 samples with the highest reported uncertainty estimates (EU or TU per model). After each step, we train models using the Adam optimizer for 20 epochs and select the one with the best accuracy from the validation set. AL process stops when the training set size reaches 150. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_014.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658433/table-16-complexity-comparison-between-resnet-based-snns-and"><img alt="Table 16: Complexity comparison between ResNet50-based SNNs and CreNets using CIFAR10/100 datasets. The inference cost per dataset is measured by a single NVIDIA P100 SXM2-16GB GPU for both models. Table 17: Inference cost comparison on CPU between SNNs and CreNets per single CIFAR 10 input of different architectures. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_015.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-124810326-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="a719680407fb714460de77499a74a14f" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":118967032,"asset_id":124810326,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/118967032/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="124810326"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="124810326"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 124810326; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=124810326]").text(description); $(".js-view-count[data-work-id=124810326]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 124810326; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='124810326']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "a719680407fb714460de77499a74a14f" } } $('.js-work-strip[data-work-id=124810326]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":124810326,"title":"Credal Deep Ensembles for Uncertainty Quantification","translated_title":"","metadata":{"abstract":"This paper introduces an innovative approach to classification called Credal Deep Ensembles (CreDEs), namely, ensembles of novel Credal-Set Neural Networks (CreNets). CreNets are trained to predict a lower and an upper probability bound for each class, which, in turn, determine a convex set of probabilities (credal set) on the class set. The training employs a loss inspired by distributionally robust optimization which simulates the potential divergence of the test distribution from the training distribution, in such a way that the width of the predicted probability interval reflects the 'epistemic' uncertainty about the future data distribution. Ensembles can be constructed by training multiple CreNets, each associated with a different random seed, and averaging the outputted intervals. Extensive experiments are conducted on various out-of-distributions (OOD) detection benchmarks (CIFAR10/100 vs SVHN/Tiny-ImageNet, CIFAR10 vs CIFAR10-C, ImageNet vs ImageNet-O) and using different network architectures (ResNet50, VGG16, and ViT Base). Compared to Deep Ensemble baselines, CreDEs demonstrate higher test accuracy, lower expected calibration error, and significantly improved epistemic uncertainty estimation.","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"2024 Conference on Neural Information Processing Systems (NeurIPS 2024)"},"translated_abstract":"This paper introduces an innovative approach to classification called Credal Deep Ensembles (CreDEs), namely, ensembles of novel Credal-Set Neural Networks (CreNets). CreNets are trained to predict a lower and an upper probability bound for each class, which, in turn, determine a convex set of probabilities (credal set) on the class set. The training employs a loss inspired by distributionally robust optimization which simulates the potential divergence of the test distribution from the training distribution, in such a way that the width of the predicted probability interval reflects the 'epistemic' uncertainty about the future data distribution. Ensembles can be constructed by training multiple CreNets, each associated with a different random seed, and averaging the outputted intervals. Extensive experiments are conducted on various out-of-distributions (OOD) detection benchmarks (CIFAR10/100 vs SVHN/Tiny-ImageNet, CIFAR10 vs CIFAR10-C, ImageNet vs ImageNet-O) and using different network architectures (ResNet50, VGG16, and ViT Base). Compared to Deep Ensemble baselines, CreDEs demonstrate higher test accuracy, lower expected calibration error, and significantly improved epistemic uncertainty estimation.","internal_url":"https://www.academia.edu/124810326/Credal_Deep_Ensembles_for_Uncertainty_Quantification","translated_internal_url":"","created_at":"2024-10-17T07:57:17.215-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":42561998,"work_id":124810326,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":1,"name":"Kaizheng Wang","title":"Credal Deep Ensembles for Uncertainty Quantification"},{"id":42561999,"work_id":124810326,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":2,"name":"Keivan Shariatmadar","title":"Credal Deep Ensembles for Uncertainty Quantification"},{"id":42562000,"work_id":124810326,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053054,"email":"d***s@kuleuven.be","display_order":3,"name":"David Moens","title":"Credal Deep Ensembles for Uncertainty Quantification"},{"id":42562001,"work_id":124810326,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4480510,"email":"h***z@kuleuven.be","display_order":4,"name":"Hans Hallez","title":"Credal Deep Ensembles for Uncertainty Quantification"}],"downloadable_attachments":[{"id":118967032,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118967032/thumbnails/1.jpg","file_name":"CreDEs_NeurIPS_2024_new_version.pdf","download_url":"https://www.academia.edu/attachments/118967032/download_file","bulk_download_file_name":"Credal_Deep_Ensembles_for_Uncertainty_Qu.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118967032/CreDEs_NeurIPS_2024_new_version-libre.pdf?1729179360=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Deep_Ensembles_for_Uncertainty_Qu.pdf\u0026Expires=1743344602\u0026Signature=BJkKPfZOvCZfe092a00VtMgBYzcXN55USkmG4G8E-voWb97OgumATUagFC1771VYexFEM~J10k5j7kfqqu-z9rS5H8PD5iDoQKjg1Nk8SAx79gW2Ee1dzLQChVQgaCc-h~ivg97NfnpBtl5ShWhJ8ezCN08ar9-hcUz5t~4f-2h5yImMPnQJKG89WNyCXcKw-PEFUa6p~OuA53bsi~QxOdgjN6rb10~smrz4p9iyKVdtfHiQ9ZGQXHP8uG2jbyf4OfcElES2AXPcUq5HbtKQdjDavJ6wTGQ~tkmnxOVTdAUUTgKojalEdhGmpWMRO605uFHhVzW2uNpACvC9kUCFDw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Credal_Deep_Ensembles_for_Uncertainty_Quantification","translated_slug":"","page_count":33,"language":"en","content_type":"Work","summary":"This paper introduces an innovative approach to classification called Credal Deep Ensembles (CreDEs), namely, ensembles of novel Credal-Set Neural Networks (CreNets). CreNets are trained to predict a lower and an upper probability bound for each class, which, in turn, determine a convex set of probabilities (credal set) on the class set. The training employs a loss inspired by distributionally robust optimization which simulates the potential divergence of the test distribution from the training distribution, in such a way that the width of the predicted probability interval reflects the 'epistemic' uncertainty about the future data distribution. Ensembles can be constructed by training multiple CreNets, each associated with a different random seed, and averaging the outputted intervals. Extensive experiments are conducted on various out-of-distributions (OOD) detection benchmarks (CIFAR10/100 vs SVHN/Tiny-ImageNet, CIFAR10 vs CIFAR10-C, ImageNet vs ImageNet-O) and using different network architectures (ResNet50, VGG16, and ViT Base). Compared to Deep Ensemble baselines, CreDEs demonstrate higher test accuracy, lower expected calibration error, and significantly improved epistemic uncertainty estimation.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":118967032,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118967032/thumbnails/1.jpg","file_name":"CreDEs_NeurIPS_2024_new_version.pdf","download_url":"https://www.academia.edu/attachments/118967032/download_file","bulk_download_file_name":"Credal_Deep_Ensembles_for_Uncertainty_Qu.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118967032/CreDEs_NeurIPS_2024_new_version-libre.pdf?1729179360=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Deep_Ensembles_for_Uncertainty_Qu.pdf\u0026Expires=1743344602\u0026Signature=BJkKPfZOvCZfe092a00VtMgBYzcXN55USkmG4G8E-voWb97OgumATUagFC1771VYexFEM~J10k5j7kfqqu-z9rS5H8PD5iDoQKjg1Nk8SAx79gW2Ee1dzLQChVQgaCc-h~ivg97NfnpBtl5ShWhJ8ezCN08ar9-hcUz5t~4f-2h5yImMPnQJKG89WNyCXcKw-PEFUa6p~OuA53bsi~QxOdgjN6rb10~smrz4p9iyKVdtfHiQ9ZGQXHP8uG2jbyf4OfcElES2AXPcUq5HbtKQdjDavJ6wTGQ~tkmnxOVTdAUUTgKojalEdhGmpWMRO605uFHhVzW2uNpACvC9kUCFDw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":21593,"name":"Artificial Inteligence","url":"https://www.academia.edu/Documents/in/Artificial_Inteligence"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":41815,"name":"Applied Probability","url":"https://www.academia.edu/Documents/in/Applied_Probability"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[{"id":45208278,"url":"https://nips.cc/virtual/2024/poster/95324"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-124810326-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557703"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557703/Random_Set_Convolutional_Neural_Network_RS_CNN_for_Epistemic_Deep_Learning"><img alt="Research paper thumbnail of Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning" class="work-thumbnail" src="https://attachments.academia-assets.com/111225284/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557703/Random_Set_Convolutional_Neural_Network_RS_CNN_for_Epistemic_Deep_Learning">Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning</a></div><div class="wp-workCard_item"><span>arXiv:2307.05772</span><span>, 2023</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Machine learning is increasingly deployed in safety-critical domains where robustness against adv...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Machine learning is increasingly deployed in safety-critical domains where robustness against adversarial attacks is crucial and erroneous predictions could lead to potentially catastrophic consequences. This highlights the need for learning systems to be equipped with the means to determine a model's confidence in its prediction and the epistemic uncertainty associated with it, 'to know when a model does not know'. In this paper, we propose a novel Random-Set Convolutional Neural Network (RS-CNN) for classification which predicts belief functions rather than probability vectors over the set of classes, using the mathematics of random sets, i.e., distributions over the power set of the sample space. Based on the epistemic deep learning approach, random-set models are capable of representing the 'epistemic' uncertainty induced in machine learning by limited training sets. We estimate epistemic uncertainty by approximating the size of credal sets associated with the predicted belief functions, and experimentally demonstrate how our approach outperforms competing uncertainty-aware approaches in a classical evaluation setting. The performance of RS-CNN is best demonstrated on OOD samples where it manages to capture the true prediction while standard CNNs fail.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="917a88d58119ede78c8506efd54b93e6" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111225284,"asset_id":114557703,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111225284/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557703"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557703"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557703; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557703]").text(description); $(".js-view-count[data-work-id=114557703]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557703; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557703']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "917a88d58119ede78c8506efd54b93e6" } } $('.js-work-strip[data-work-id=114557703]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557703,"title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning","translated_title":"","metadata":{"doi":"10.48550/arXiv.2307.05772","abstract":"Machine learning is increasingly deployed in safety-critical domains where robustness against adversarial attacks is crucial and erroneous predictions could lead to potentially catastrophic consequences. This highlights the need for learning systems to be equipped with the means to determine a model's confidence in its prediction and the epistemic uncertainty associated with it, 'to know when a model does not know'. In this paper, we propose a novel Random-Set Convolutional Neural Network (RS-CNN) for classification which predicts belief functions rather than probability vectors over the set of classes, using the mathematics of random sets, i.e., distributions over the power set of the sample space. Based on the epistemic deep learning approach, random-set models are capable of representing the 'epistemic' uncertainty induced in machine learning by limited training sets. We estimate epistemic uncertainty by approximating the size of credal sets associated with the predicted belief functions, and experimentally demonstrate how our approach outperforms competing uncertainty-aware approaches in a classical evaluation setting. The performance of RS-CNN is best demonstrated on OOD samples where it manages to capture the true prediction while standard CNNs fail.","publication_date":{"day":null,"month":null,"year":2023,"errors":{}},"publication_name":"arXiv:2307.05772"},"translated_abstract":"Machine learning is increasingly deployed in safety-critical domains where robustness against adversarial attacks is crucial and erroneous predictions could lead to potentially catastrophic consequences. This highlights the need for learning systems to be equipped with the means to determine a model's confidence in its prediction and the epistemic uncertainty associated with it, 'to know when a model does not know'. In this paper, we propose a novel Random-Set Convolutional Neural Network (RS-CNN) for classification which predicts belief functions rather than probability vectors over the set of classes, using the mathematics of random sets, i.e., distributions over the power set of the sample space. Based on the epistemic deep learning approach, random-set models are capable of representing the 'epistemic' uncertainty induced in machine learning by limited training sets. We estimate epistemic uncertainty by approximating the size of credal sets associated with the predicted belief functions, and experimentally demonstrate how our approach outperforms competing uncertainty-aware approaches in a classical evaluation setting. The performance of RS-CNN is best demonstrated on OOD samples where it manages to capture the true prediction while standard CNNs fail.","internal_url":"https://www.academia.edu/114557703/Random_Set_Convolutional_Neural_Network_RS_CNN_for_Epistemic_Deep_Learning","translated_internal_url":"","created_at":"2024-02-06T08:37:25.416-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":41048584,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":1,"name":"Kaizheng Wang","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048585,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":2,"name":"Keivan Shariatmadar","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048586,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5334584,"email":"f***n@inrialpes.fr","display_order":4,"name":"Fabio Cuzzolin","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048587,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":1513371,"email":"p***9@brookes.ac.uk","display_order":5,"name":"Fabio Cuzzolin","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048588,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4909929,"email":"c***n@cs.ucla.edu","display_order":6,"name":"Fabio Cuzzolin","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048589,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5034152,"email":"f***n@gmail.com","display_order":7,"name":"Fabio Cuzzolin","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048590,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5334585,"email":"c***n@dei.unipd.it","display_order":8,"name":"Fabio Cuzzolin","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048591,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053056,"email":"k***g@columbia.edu","display_order":9,"name":"Kaizheng Wang","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048592,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053057,"email":"k***5@gmail.com","display_order":10,"name":"Kaizheng Wang","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048593,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053058,"email":"k***w@yahoo.com.cn","display_order":11,"name":"Kaizheng Wang","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048594,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":213887965,"co_author_invite_id":null,"email":"s***1@gmail.com","display_order":12,"name":"Muhammad Mubashar","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048595,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":41963407,"co_author_invite_id":null,"email":"m***1@gmail.com","display_order":13,"name":"M mubashar","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048596,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":280427958,"co_author_invite_id":null,"email":"m***d@gmail.com","display_order":14,"name":"Muhammad Mubashar","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048597,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":538842,"co_author_invite_id":null,"email":"k***t@gmail.com","affiliation":"Ghent University","display_order":15,"name":"Keivan Sh","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048598,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604285,"email":"1***5@brookes.ac.uk","display_order":16,"name":"Shireen Kudukkil Manchingal","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"}],"downloadable_attachments":[{"id":111225284,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225284/thumbnails/1.jpg","file_name":"2307.05772.pdf","download_url":"https://www.academia.edu/attachments/111225284/download_file","bulk_download_file_name":"Random_Set_Convolutional_Neural_Network.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225284/2307.05772-libre.pdf?1707237984=\u0026response-content-disposition=attachment%3B+filename%3DRandom_Set_Convolutional_Neural_Network.pdf\u0026Expires=1743344602\u0026Signature=AZBEZfP898v3WBOovCc-fkMprMLber4XKD1fDtJ9egCmEynlmC7E8MANigzcPKIzGxLl24res4n-ZY8UQ0iP4aMlzoMcWlnSb72KqrgtEbo20JPuzWsQfGiQdIKUGGl-zpg156JeghunuQMPS-T8Kd4lZDKbqksvfGc-h3I2vY21J-rNzR4aFSWcbNIpyxB8PQPL0QpABUnscieO~EesmG-hvKCBxn8P6FJB5n1RVg1bt49hua2L0ulZTniPCPNdSE4z9uZrfKWJS-2WHcFtrOcBO2Q5fWWy-KhIqfgPUUNP196JnUKPWnLDueAY2~ib-cFiJMXlWZOWPn0tnSWo9Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Random_Set_Convolutional_Neural_Network_RS_CNN_for_Epistemic_Deep_Learning","translated_slug":"","page_count":19,"language":"en","content_type":"Work","summary":"Machine learning is increasingly deployed in safety-critical domains where robustness against adversarial attacks is crucial and erroneous predictions could lead to potentially catastrophic consequences. This highlights the need for learning systems to be equipped with the means to determine a model's confidence in its prediction and the epistemic uncertainty associated with it, 'to know when a model does not know'. In this paper, we propose a novel Random-Set Convolutional Neural Network (RS-CNN) for classification which predicts belief functions rather than probability vectors over the set of classes, using the mathematics of random sets, i.e., distributions over the power set of the sample space. Based on the epistemic deep learning approach, random-set models are capable of representing the 'epistemic' uncertainty induced in machine learning by limited training sets. We estimate epistemic uncertainty by approximating the size of credal sets associated with the predicted belief functions, and experimentally demonstrate how our approach outperforms competing uncertainty-aware approaches in a classical evaluation setting. The performance of RS-CNN is best demonstrated on OOD samples where it manages to capture the true prediction while standard CNNs fail.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":111225284,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225284/thumbnails/1.jpg","file_name":"2307.05772.pdf","download_url":"https://www.academia.edu/attachments/111225284/download_file","bulk_download_file_name":"Random_Set_Convolutional_Neural_Network.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225284/2307.05772-libre.pdf?1707237984=\u0026response-content-disposition=attachment%3B+filename%3DRandom_Set_Convolutional_Neural_Network.pdf\u0026Expires=1743344602\u0026Signature=AZBEZfP898v3WBOovCc-fkMprMLber4XKD1fDtJ9egCmEynlmC7E8MANigzcPKIzGxLl24res4n-ZY8UQ0iP4aMlzoMcWlnSb72KqrgtEbo20JPuzWsQfGiQdIKUGGl-zpg156JeghunuQMPS-T8Kd4lZDKbqksvfGc-h3I2vY21J-rNzR4aFSWcbNIpyxB8PQPL0QpABUnscieO~EesmG-hvKCBxn8P6FJB5n1RVg1bt49hua2L0ulZTniPCPNdSE4z9uZrfKWJS-2WHcFtrOcBO2Q5fWWy-KhIqfgPUUNP196JnUKPWnLDueAY2~ib-cFiJMXlWZOWPn0tnSWo9Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":94224,"name":"Theory of Evidence","url":"https://www.academia.edu/Documents/in/Theory_of_Evidence"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":492766,"name":"Random Sets","url":"https://www.academia.edu/Documents/in/Random_Sets"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-114557703-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557668"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557668/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks"><img alt="Research paper thumbnail of CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks" class="work-thumbnail" src="https://attachments.academia-assets.com/111225247/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557668/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks">CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks</a></div><div class="wp-workCard_item"><span>arXiv:2401.05043</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Uncertainty estimation is increasingly attractive for improving the reliability of neural network...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="c9c26a15cc317bcff0c879b2f5fe8a28" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111225247,"asset_id":114557668,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111225247/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557668"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557668"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557668; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557668]").text(description); $(".js-view-count[data-work-id=114557668]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557668; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557668']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "c9c26a15cc317bcff0c879b2f5fe8a28" } } $('.js-work-strip[data-work-id=114557668]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557668,"title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks","translated_title":"","metadata":{"doi":"10.48550/arXiv.2401.05043","abstract":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","ai_title_tag":"CreINNs: Interval Neural Networks for Uncertainty in Classification","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"arXiv:2401.05043"},"translated_abstract":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","internal_url":"https://www.academia.edu/114557668/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks","translated_internal_url":"","created_at":"2024-02-06T08:34:40.698-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":41048571,"work_id":114557668,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":1,"name":"Kaizheng Wang","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":41048572,"work_id":114557668,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":2,"name":"Keivan Shariatmadar","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":41048573,"work_id":114557668,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053054,"email":"d***s@kuleuven.be","display_order":4,"name":"David Moens","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":41048574,"work_id":114557668,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4480510,"email":"h***z@kuleuven.be","display_order":5,"name":"Hans Hallez","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"}],"downloadable_attachments":[{"id":111225247,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225247/thumbnails/1.jpg","file_name":"2401.05043.pdf","download_url":"https://www.academia.edu/attachments/111225247/download_file","bulk_download_file_name":"CreINNs_Credal_Set_Interval_Neural_Netwo.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225247/2401.05043-libre.pdf?1707237616=\u0026response-content-disposition=attachment%3B+filename%3DCreINNs_Credal_Set_Interval_Neural_Netwo.pdf\u0026Expires=1743344602\u0026Signature=asLc9aFVW7IX~g2BHOxR0LKrUxBNXXWg51ox1h71gUebct5NE-R6CRv6TzBAWXtrRR-LWyRBBlRUMW7BWvgam3EjfDtawcmqDdlkUaL7xskmqqcNmZIaFoY3Uv2Wcrb6-1hfzB4vP4A~jfbqXpXBVQWA~goWmV5lTrOPpgpQM48qPf9NQ9U5t-MlXxbH9XpIB9pewUqxc9yKCWmvzx4fpcyFdBPamTC5Gldg-PZkOyL1Vb~wdU-TMkyhkoB4gXFd49t-vIWaJUKNQU2f4ZYPqDEowrLlV0L2MvwzO3uxfecZJawNO0KUfHE6NieQNGsbjWVdgduHnrJlB4iU2BPcmQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks","translated_slug":"","page_count":11,"language":"en","content_type":"Work","summary":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":111225247,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225247/thumbnails/1.jpg","file_name":"2401.05043.pdf","download_url":"https://www.academia.edu/attachments/111225247/download_file","bulk_download_file_name":"CreINNs_Credal_Set_Interval_Neural_Netwo.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225247/2401.05043-libre.pdf?1707237616=\u0026response-content-disposition=attachment%3B+filename%3DCreINNs_Credal_Set_Interval_Neural_Netwo.pdf\u0026Expires=1743344602\u0026Signature=asLc9aFVW7IX~g2BHOxR0LKrUxBNXXWg51ox1h71gUebct5NE-R6CRv6TzBAWXtrRR-LWyRBBlRUMW7BWvgam3EjfDtawcmqDdlkUaL7xskmqqcNmZIaFoY3Uv2Wcrb6-1hfzB4vP4A~jfbqXpXBVQWA~goWmV5lTrOPpgpQM48qPf9NQ9U5t-MlXxbH9XpIB9pewUqxc9yKCWmvzx4fpcyFdBPamTC5Gldg-PZkOyL1Vb~wdU-TMkyhkoB4gXFd49t-vIWaJUKNQU2f4ZYPqDEowrLlV0L2MvwzO3uxfecZJawNO0KUfHE6NieQNGsbjWVdgduHnrJlB4iU2BPcmQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":1185,"name":"Image Processing","url":"https://www.academia.edu/Documents/in/Image_Processing"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":106145,"name":"Classification","url":"https://www.academia.edu/Documents/in/Classification"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-114557668-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557620"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557620/Reasoning_with_random_sets_An_agenda_for_the_future"><img alt="Research paper thumbnail of Reasoning with random sets: An agenda for the future" class="work-thumbnail" src="https://attachments.academia-assets.com/111225200/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557620/Reasoning_with_random_sets_An_agenda_for_the_future">Reasoning with random sets: An agenda for the future</a></div><div class="wp-workCard_item"><span>arXiv:2401.09435</span><span>, 2023</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">In this paper, we discuss a potential agenda for future work in the theory of random sets and bel...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">In this paper, we discuss a potential agenda for future work in the theory of random sets and belief functions, touching upon a number of focal issues: the development of a fully-fledged theory of statistical reasoning with random sets, including the generalisation of logistic regression and of the classical laws of probability; the further development of the geometric approach to uncertainty, to include general random sets, a wider range of uncertainty measures and alternative geometric representations; the application of this new theory to high-impact areas such as climate change, machine learning and statistical learning theory.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="c959d6bc2993888f55ed3f9804f34c53" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111225200,"asset_id":114557620,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111225200/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557620"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557620"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557620; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557620]").text(description); $(".js-view-count[data-work-id=114557620]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557620; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557620']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "c959d6bc2993888f55ed3f9804f34c53" } } $('.js-work-strip[data-work-id=114557620]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557620,"title":"Reasoning with random sets: An agenda for the future","translated_title":"","metadata":{"doi":"10.48550/arXiv.2401.09435","abstract":"In this paper, we discuss a potential agenda for future work in the theory of random sets and belief functions, touching upon a number of focal issues: the development of a fully-fledged theory of statistical reasoning with random sets, including the generalisation of logistic regression and of the classical laws of probability; the further development of the geometric approach to uncertainty, to include general random sets, a wider range of uncertainty measures and alternative geometric representations; the application of this new theory to high-impact areas such as climate change, machine learning and statistical learning theory.","publication_date":{"day":null,"month":null,"year":2023,"errors":{}},"publication_name":"arXiv:2401.09435"},"translated_abstract":"In this paper, we discuss a potential agenda for future work in the theory of random sets and belief functions, touching upon a number of focal issues: the development of a fully-fledged theory of statistical reasoning with random sets, including the generalisation of logistic regression and of the classical laws of probability; the further development of the geometric approach to uncertainty, to include general random sets, a wider range of uncertainty measures and alternative geometric representations; the application of this new theory to high-impact areas such as climate change, machine learning and statistical learning theory.","internal_url":"https://www.academia.edu/114557620/Reasoning_with_random_sets_An_agenda_for_the_future","translated_internal_url":"","created_at":"2024-02-06T08:32:14.973-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":111225200,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225200/thumbnails/1.jpg","file_name":"2401.09435.pdf","download_url":"https://www.academia.edu/attachments/111225200/download_file","bulk_download_file_name":"Reasoning_with_random_sets_An_agenda_for.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225200/2401.09435-libre.pdf?1707237623=\u0026response-content-disposition=attachment%3B+filename%3DReasoning_with_random_sets_An_agenda_for.pdf\u0026Expires=1743344603\u0026Signature=BeMfNW1rQ~iRlxkSZg9XRbEiERpPF31QAuiwv8HPTkMPatuS-~bnrQPw9Oxh6BeSikvxkKxA59s066QxCSza1HmestPjUTiQVS657T7ajykz~nrHQMwccld1j838IuyFtrQhPbofoNisWXhhc6NSkogUBTq2dsGe2uHJD-kj2Jtbltu~BsqiLVyDexYx0TPgjiIrZIAcgWCgrUJy3kB3vpf1EtGQpWbWxgCOn~G6hlP5Dj6bw4l4JYl-WiOHcTgdtyyRxtnFfQMdfslkFj00MITZOAqbv02vEwlX8Ycj2kV0Tyhh3DOQ9Vjtgp0SJ8M3JqgpfMwubdqg0NmERQ80wQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Reasoning_with_random_sets_An_agenda_for_the_future","translated_slug":"","page_count":94,"language":"en","content_type":"Work","summary":"In this paper, we discuss a potential agenda for future work in the theory of random sets and belief functions, touching upon a number of focal issues: the development of a fully-fledged theory of statistical reasoning with random sets, including the generalisation of logistic regression and of the classical laws of probability; the further development of the geometric approach to uncertainty, to include general random sets, a wider range of uncertainty measures and alternative geometric representations; the application of this new theory to high-impact areas such as climate change, machine learning and statistical learning theory.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":111225200,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225200/thumbnails/1.jpg","file_name":"2401.09435.pdf","download_url":"https://www.academia.edu/attachments/111225200/download_file","bulk_download_file_name":"Reasoning_with_random_sets_An_agenda_for.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225200/2401.09435-libre.pdf?1707237623=\u0026response-content-disposition=attachment%3B+filename%3DReasoning_with_random_sets_An_agenda_for.pdf\u0026Expires=1743344603\u0026Signature=BeMfNW1rQ~iRlxkSZg9XRbEiERpPF31QAuiwv8HPTkMPatuS-~bnrQPw9Oxh6BeSikvxkKxA59s066QxCSza1HmestPjUTiQVS657T7ajykz~nrHQMwccld1j838IuyFtrQhPbofoNisWXhhc6NSkogUBTq2dsGe2uHJD-kj2Jtbltu~BsqiLVyDexYx0TPgjiIrZIAcgWCgrUJy3kB3vpf1EtGQpWbWxgCOn~G6hlP5Dj6bw4l4JYl-WiOHcTgdtyyRxtnFfQMdfslkFj00MITZOAqbv02vEwlX8Ycj2kV0Tyhh3DOQ9Vjtgp0SJ8M3JqgpfMwubdqg0NmERQ80wQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":287095,"name":"Knowledge Representation and Reasoning","url":"https://www.academia.edu/Documents/in/Knowledge_Representation_and_Reasoning"},{"id":492766,"name":"Random Sets","url":"https://www.academia.edu/Documents/in/Random_Sets"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-114557620-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557528"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557528/Credal_Learning_Theory"><img alt="Research paper thumbnail of Credal Learning Theory" class="work-thumbnail" src="https://attachments.academia-assets.com/111225102/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557528/Credal_Learning_Theory">Credal Learning Theory</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a>, <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/MaryamSultana18">Maryam Sultana</a>, and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/MicheleCaprio">Michele Caprio</a></span></div><div class="wp-workCard_item"><span>arXiv:2402.00957</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Statistical learning theory is the foundation of machine learning, providing theoretical bounds f...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learnt from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not) as well as infinite model spaces, which directly generalize classical results.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-114557528-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-114557528-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/27673085/figure-1-graphical-representation-of-the-proposed-learning"><img alt="Figure 1: Graphical representation of the proposed learning framework. Given an available finite sample of training sets, each assumed to be generated by a single data distribution, one can learn a credal set P of data distributions in either a frequentist or subjectivist fashion (Section 3). This allows us to derive generalization bounds under credal uncertainty (Section 4). " class="figure-slide-image" src="https://figures.academia-assets.com/111225102/figure_001.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-114557528-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="ead3dbb563585f73fd506b907a5ac1eb" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111225102,"asset_id":114557528,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111225102/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557528"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557528"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557528; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557528]").text(description); $(".js-view-count[data-work-id=114557528]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557528; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557528']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "ead3dbb563585f73fd506b907a5ac1eb" } } $('.js-work-strip[data-work-id=114557528]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557528,"title":"Credal Learning Theory","translated_title":"","metadata":{"doi":"10.48550/arXiv.2402.00957","abstract":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learnt from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not) as well as infinite model spaces, which directly generalize classical results.","ai_title_tag":"Credal Learning Theory for Robust ML Models","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"arXiv:2402.00957"},"translated_abstract":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learnt from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not) as well as infinite model spaces, which directly generalize classical results.","internal_url":"https://www.academia.edu/114557528/Credal_Learning_Theory","translated_internal_url":"","created_at":"2024-02-06T08:27:55.672-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":41048551,"work_id":114557528,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5334584,"email":"f***n@inrialpes.fr","display_order":1,"name":"Fabio Cuzzolin","title":"Credal Learning Theory"},{"id":41048552,"work_id":114557528,"tagging_user_id":366407,"tagged_user_id":303286795,"co_author_invite_id":8053047,"email":"m***a@brookes.ac.uk","display_order":2,"name":"Maryam Sultana","title":"Credal Learning Theory"},{"id":41048553,"work_id":114557528,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053048,"email":"e***a@brookes.ac.uk","display_order":3,"name":"Eleni Elia","title":"Credal Learning Theory"},{"id":41048554,"work_id":114557528,"tagging_user_id":366407,"tagged_user_id":303284276,"co_author_invite_id":8053049,"email":"c***o@seas.upenn.edu","display_order":4,"name":"Michele Caprio","title":"Credal Learning Theory"}],"downloadable_attachments":[{"id":111225102,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225102/thumbnails/1.jpg","file_name":"2402.00957.pdf","download_url":"https://www.academia.edu/attachments/111225102/download_file","bulk_download_file_name":"Credal_Learning_Theory.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225102/2402.00957-libre.pdf?1707237627=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Learning_Theory.pdf\u0026Expires=1743344603\u0026Signature=Py8CtrTO-z~0WlK3kREJ-Vi302DBDzzg1Pe7eHCZL8Afxp9g~86cZ5LgWZ0b67Gts~A6jcapcqqRTRWEXzmxRwnnBI3IXVhylfMQ8IfLTrGxDViI6WyUop7T7F5UszmF~c-BWrQuyCYfECRpJl17o7CzXSxAWaAiFNEw1DzC2qJbZQDIkc9UKvmZuvRRoBlxHdhUTJdfJqR24y~hlI3GRNVVs1TDX6BBlGwslEdd~rCXis9vE51t6og1hUejLy24eJGeKCjSmD9WVyQyCUBPbvqMydyDbNw9OeGSydSIHW9EtDygoQh4B21CLg2wlUaWUcWTCLuGokF8LNglnk~PIA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Credal_Learning_Theory","translated_slug":"","page_count":14,"language":"en","content_type":"Work","summary":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learnt from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not) as well as infinite model spaces, which directly generalize classical results.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":111225102,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225102/thumbnails/1.jpg","file_name":"2402.00957.pdf","download_url":"https://www.academia.edu/attachments/111225102/download_file","bulk_download_file_name":"Credal_Learning_Theory.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225102/2402.00957-libre.pdf?1707237627=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Learning_Theory.pdf\u0026Expires=1743344603\u0026Signature=Py8CtrTO-z~0WlK3kREJ-Vi302DBDzzg1Pe7eHCZL8Afxp9g~86cZ5LgWZ0b67Gts~A6jcapcqqRTRWEXzmxRwnnBI3IXVhylfMQ8IfLTrGxDViI6WyUop7T7F5UszmF~c-BWrQuyCYfECRpJl17o7CzXSxAWaAiFNEw1DzC2qJbZQDIkc9UKvmZuvRRoBlxHdhUTJdfJqR24y~hlI3GRNVVs1TDX6BBlGwslEdd~rCXis9vE51t6og1hUejLy24eJGeKCjSmD9WVyQyCUBPbvqMydyDbNw9OeGSydSIHW9EtDygoQh4B21CLg2wlUaWUcWTCLuGokF8LNglnk~PIA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":37316,"name":"Statistical Learning Theory","url":"https://www.academia.edu/Documents/in/Statistical_Learning_Theory"},{"id":38246,"name":"Statistical Learning","url":"https://www.academia.edu/Documents/in/Statistical_Learning"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":388873,"name":"Mathematics and Statistics","url":"https://www.academia.edu/Documents/in/Mathematics_and_Statistics"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-114557528-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557478"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/114557478/Semantics_Driven_Generative_Replay_for_Few_Shot_Class_Incremental_Learning"><img alt="Research paper thumbnail of Semantics-Driven Generative Replay for Few-Shot Class Incremental Learning" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title">Semantics-Driven Generative Replay for Few-Shot Class Incremental Learning</div><div class="wp-workCard_item"><span>MM '22: Proceedings of the 30th ACM International Conference on Multimedia</span><span>, 2022</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">We deal with the problem of few-shot class incremental learning (FSCIL), which requires a model t...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">We deal with the problem of few-shot class incremental learning (FSCIL), which requires a model to continuously recognize new categories for which limited training data are available. Existing FSCIL methods depend on prior knowledge to regularize the model parameters for combating catastrophic forgetting. Devising an effective prior in a low-data regime, however, is not trivial. The memory-replay based approaches from the fully-supervised class incremental learning (CIL) literature cannot be used directly for FSCIL as the generative memory-replay modules of CIL are hard to train from few training samples. However, generative replay can tackle both the stability and plasticity of the models simultaneously by generating a large number of class-conditional samples. Convinced by this fact, we propose a generative modeling-based FSCIL framework using the paradigm of memory-replay in which a novel conditional few-shot generative adversarial network (GAN) is incrementally trained to produce visual features while ensuring the stability-plasticity trade-off through novel loss functions and combating the mode-collapse problem effectively. Furthermore, the class-specific synthesized visual features from the few-shot GAN are constrained to match the respective latent semantic prototypes obtained from a well-defined semantic space. We find that the advantages of this semantic restriction is two-fold, in dealing with forgetting, while making the features class-discernible. The model requires a single per-class prototype vector to be maintained in a dynamic memory buffer. Experimental results on the benchmark and large-scale CiFAR-100, CUB-200, and Mini-ImageNet confirm the superiority of our model over the current FSCIL state of the art.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557478"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557478"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557478; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557478]").text(description); $(".js-view-count[data-work-id=114557478]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557478; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557478']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=114557478]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557478,"title":"Semantics-Driven Generative Replay for Few-Shot Class Incremental Learning","translated_title":"","metadata":{"doi":"10.1145/3503161.3548160","abstract":"We deal with the problem of few-shot class incremental learning (FSCIL), which requires a model to continuously recognize new categories for which limited training data are available. Existing FSCIL methods depend on prior knowledge to regularize the model parameters for combating catastrophic forgetting. Devising an effective prior in a low-data regime, however, is not trivial. The memory-replay based approaches from the fully-supervised class incremental learning (CIL) literature cannot be used directly for FSCIL as the generative memory-replay modules of CIL are hard to train from few training samples. However, generative replay can tackle both the stability and plasticity of the models simultaneously by generating a large number of class-conditional samples. Convinced by this fact, we propose a generative modeling-based FSCIL framework using the paradigm of memory-replay in which a novel conditional few-shot generative adversarial network (GAN) is incrementally trained to produce visual features while ensuring the stability-plasticity trade-off through novel loss functions and combating the mode-collapse problem effectively. Furthermore, the class-specific synthesized visual features from the few-shot GAN are constrained to match the respective latent semantic prototypes obtained from a well-defined semantic space. We find that the advantages of this semantic restriction is two-fold, in dealing with forgetting, while making the features class-discernible. The model requires a single per-class prototype vector to be maintained in a dynamic memory buffer. Experimental results on the benchmark and large-scale CiFAR-100, CUB-200, and Mini-ImageNet confirm the superiority of our model over the current FSCIL state of the art.","publication_date":{"day":null,"month":null,"year":2022,"errors":{}},"publication_name":"MM '22: Proceedings of the 30th ACM International Conference on Multimedia"},"translated_abstract":"We deal with the problem of few-shot class incremental learning (FSCIL), which requires a model to continuously recognize new categories for which limited training data are available. Existing FSCIL methods depend on prior knowledge to regularize the model parameters for combating catastrophic forgetting. Devising an effective prior in a low-data regime, however, is not trivial. The memory-replay based approaches from the fully-supervised class incremental learning (CIL) literature cannot be used directly for FSCIL as the generative memory-replay modules of CIL are hard to train from few training samples. However, generative replay can tackle both the stability and plasticity of the models simultaneously by generating a large number of class-conditional samples. Convinced by this fact, we propose a generative modeling-based FSCIL framework using the paradigm of memory-replay in which a novel conditional few-shot generative adversarial network (GAN) is incrementally trained to produce visual features while ensuring the stability-plasticity trade-off through novel loss functions and combating the mode-collapse problem effectively. Furthermore, the class-specific synthesized visual features from the few-shot GAN are constrained to match the respective latent semantic prototypes obtained from a well-defined semantic space. We find that the advantages of this semantic restriction is two-fold, in dealing with forgetting, while making the features class-discernible. The model requires a single per-class prototype vector to be maintained in a dynamic memory buffer. Experimental results on the benchmark and large-scale CiFAR-100, CUB-200, and Mini-ImageNet confirm the superiority of our model over the current FSCIL state of the art.","internal_url":"https://www.academia.edu/114557478/Semantics_Driven_Generative_Replay_for_Few_Shot_Class_Incremental_Learning","translated_internal_url":"","created_at":"2024-02-06T08:26:29.265-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Semantics_Driven_Generative_Replay_for_Few_Shot_Class_Incremental_Learning","translated_slug":"","page_count":null,"language":"en","content_type":"Work","summary":"We deal with the problem of few-shot class incremental learning (FSCIL), which requires a model to continuously recognize new categories for which limited training data are available. Existing FSCIL methods depend on prior knowledge to regularize the model parameters for combating catastrophic forgetting. Devising an effective prior in a low-data regime, however, is not trivial. The memory-replay based approaches from the fully-supervised class incremental learning (CIL) literature cannot be used directly for FSCIL as the generative memory-replay modules of CIL are hard to train from few training samples. However, generative replay can tackle both the stability and plasticity of the models simultaneously by generating a large number of class-conditional samples. Convinced by this fact, we propose a generative modeling-based FSCIL framework using the paradigm of memory-replay in which a novel conditional few-shot generative adversarial network (GAN) is incrementally trained to produce visual features while ensuring the stability-plasticity trade-off through novel loss functions and combating the mode-collapse problem effectively. Furthermore, the class-specific synthesized visual features from the few-shot GAN are constrained to match the respective latent semantic prototypes obtained from a well-defined semantic space. We find that the advantages of this semantic restriction is two-fold, in dealing with forgetting, while making the features class-discernible. The model requires a single per-class prototype vector to be maintained in a dynamic memory buffer. Experimental results on the benchmark and large-scale CiFAR-100, CUB-200, and Mini-ImageNet confirm the superiority of our model over the current FSCIL state of the art.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[{"id":39239752,"url":"https://dl.acm.org/doi/abs/10.1145/3503161.3548160"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-114557478-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557325"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557325/Temporal_DINO_A_Self_supervised_Video_Strategy_to_Enhance_Action_Prediction"><img alt="Research paper thumbnail of Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction" class="work-thumbnail" src="https://attachments.academia-assets.com/111224952/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557325/Temporal_DINO_A_Self_supervised_Video_Strategy_to_Enhance_Action_Prediction">Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a> and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/AndrewBradley42">Andrew Bradley</a></span></div><div class="wp-workCard_item"><span>2023 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)</span><span>, 2023</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">The emerging field of action prediction-the task of forecasting action in a video sequence-plays ...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">The emerging field of action prediction-the task of forecasting action in a video sequence-plays a vital role in various computer vision applications such as autonomous driving, activity analysis and human-computer interaction. Despite significant advancements, accurately predicting future actions remains a challenging problem due to high dimensionality, complex dynamics and uncertainties inherent in video data. Traditional supervised approaches require large amounts of labelled data, which is expensive and timeconsuming to obtain. This paper introduces a novel selfsupervised video strategy for enhancing action prediction inspired by DINO (self-distillation with no labels). The approach, named Temporal-DINO, employs two models; a 'student' processing past frames; and a 'teacher' processing both past and future frames, enabling a broader temporal context. During training, the teacher guides the student to learn future context by only observing past frames. The strategy is evaluated on ROAD dataset for the action prediction downstream task using 3D-ResNet, Transformer, and LSTM architectures. The experimental results showcase significant improvements in prediction performance across these architectures, with our method achieving an average enhancement of 9.9% Precision Points (PP), which highlights its effectiveness in enhancing the backbones' capabilities of capturing long-term dependencies. Furthermore, our approach demonstrates efficiency in terms of the pretraining dataset size and the number of epochs required. This method overcomes limitations present in other approaches, including the consideration of various backbone architectures, addressing multiple prediction horizons, reducing reliance on hand-crafted augmentations, and streamlining the pretraining process into a single stage. These findings highlight the potential of our approach in diverse video-based tasks such as activity recognition, motion planning, and scene understanding. Code can be found at <a href="https://github.com/IzzeddinTeeti/ssl" rel="nofollow">https://github.com/IzzeddinTeeti/ssl</a> pred.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-114557325-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-114557325-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/5694604/figure-1-overview-of-the-proposed-temporal-dino-the-student"><img alt="Figure 1: Overview of the proposed Temporal DINO. The student model processes the past frames (21.4), while the teacher processes both the past and future frames (1:2++p,..,) - A Future-past Distillation loss is applied to their representations (Sg and Ty) to guide the student to capture the future temporal context from the teacher. " class="figure-slide-image" src="https://figures.academia-assets.com/111224952/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/5694607/figure-2-temporal-dino-self-supervised-video-strategy-to"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/111224952/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/5694612/figure-3-temporal-dino-self-supervised-video-strategy-to"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/111224952/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/5694618/table-1-the-precision-of-different-backbones-with-varying"><img alt="Table 1: The precision of different backbones with varying input lengths under the three protocols mentioned in Section 4.3 Table 2: Evaluation of three common loss functions on R3D and Swin backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/111224952/table_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/5694620/table-4-compared-to-those-pretrained-on-the-road-dataset"><img alt="compared to those pretrained on the ROAD dataset. More- over, the models that incorporate separate modelling of spa- tial and temporal relationships, such as ViT+LSTM and ResNet+LSTM, outperform the models that jointly model these relationships, namely R3D and Swin, by a margin of 16.6 and 5.7 percentage points, respectively. To have a better understanding of the generalisation ca- pability of the proposed model, we compare the perfor- mance of T-DINO with SOTA methods on another task, hu- man action recognition (on UCF101), summarised in Table 4. The results highlight the effectiveness of the enhanced temporal modelling offered by T-DINO when applied to the R3D backbone, " class="figure-slide-image" src="https://figures.academia-assets.com/111224952/table_002.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-114557325-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="816751e5643c79969a03a0bf2c7a95e0" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111224952,"asset_id":114557325,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111224952/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557325"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557325"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557325; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557325]").text(description); $(".js-view-count[data-work-id=114557325]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557325; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557325']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "816751e5643c79969a03a0bf2c7a95e0" } } $('.js-work-strip[data-work-id=114557325]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557325,"title":"Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction","translated_title":"","metadata":{"doi":"10.1109/ICCVW60793.2023.00352","abstract":"The emerging field of action prediction-the task of forecasting action in a video sequence-plays a vital role in various computer vision applications such as autonomous driving, activity analysis and human-computer interaction. Despite significant advancements, accurately predicting future actions remains a challenging problem due to high dimensionality, complex dynamics and uncertainties inherent in video data. Traditional supervised approaches require large amounts of labelled data, which is expensive and timeconsuming to obtain. This paper introduces a novel selfsupervised video strategy for enhancing action prediction inspired by DINO (self-distillation with no labels). The approach, named Temporal-DINO, employs two models; a 'student' processing past frames; and a 'teacher' processing both past and future frames, enabling a broader temporal context. During training, the teacher guides the student to learn future context by only observing past frames. The strategy is evaluated on ROAD dataset for the action prediction downstream task using 3D-ResNet, Transformer, and LSTM architectures. The experimental results showcase significant improvements in prediction performance across these architectures, with our method achieving an average enhancement of 9.9% Precision Points (PP), which highlights its effectiveness in enhancing the backbones' capabilities of capturing long-term dependencies. Furthermore, our approach demonstrates efficiency in terms of the pretraining dataset size and the number of epochs required. This method overcomes limitations present in other approaches, including the consideration of various backbone architectures, addressing multiple prediction horizons, reducing reliance on hand-crafted augmentations, and streamlining the pretraining process into a single stage. These findings highlight the potential of our approach in diverse video-based tasks such as activity recognition, motion planning, and scene understanding. Code can be found at https://github.com/IzzeddinTeeti/ssl pred.","publication_date":{"day":null,"month":null,"year":2023,"errors":{}},"publication_name":"2023 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)"},"translated_abstract":"The emerging field of action prediction-the task of forecasting action in a video sequence-plays a vital role in various computer vision applications such as autonomous driving, activity analysis and human-computer interaction. Despite significant advancements, accurately predicting future actions remains a challenging problem due to high dimensionality, complex dynamics and uncertainties inherent in video data. Traditional supervised approaches require large amounts of labelled data, which is expensive and timeconsuming to obtain. This paper introduces a novel selfsupervised video strategy for enhancing action prediction inspired by DINO (self-distillation with no labels). The approach, named Temporal-DINO, employs two models; a 'student' processing past frames; and a 'teacher' processing both past and future frames, enabling a broader temporal context. During training, the teacher guides the student to learn future context by only observing past frames. The strategy is evaluated on ROAD dataset for the action prediction downstream task using 3D-ResNet, Transformer, and LSTM architectures. The experimental results showcase significant improvements in prediction performance across these architectures, with our method achieving an average enhancement of 9.9% Precision Points (PP), which highlights its effectiveness in enhancing the backbones' capabilities of capturing long-term dependencies. Furthermore, our approach demonstrates efficiency in terms of the pretraining dataset size and the number of epochs required. This method overcomes limitations present in other approaches, including the consideration of various backbone architectures, addressing multiple prediction horizons, reducing reliance on hand-crafted augmentations, and streamlining the pretraining process into a single stage. These findings highlight the potential of our approach in diverse video-based tasks such as activity recognition, motion planning, and scene understanding. Code can be found at https://github.com/IzzeddinTeeti/ssl pred.","internal_url":"https://www.academia.edu/114557325/Temporal_DINO_A_Self_supervised_Video_Strategy_to_Enhance_Action_Prediction","translated_internal_url":"","created_at":"2024-02-06T08:20:42.928-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":41048519,"work_id":114557325,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604221,"email":"i***i@brookes.ac.uk","display_order":1,"name":"Izzeddin Teeti","title":"Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction"},{"id":41048520,"work_id":114557325,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053041,"email":"v***h@brookes.ac.uk","display_order":2,"name":"Vivek Singh","title":"Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction"},{"id":41048521,"work_id":114557325,"tagging_user_id":366407,"tagged_user_id":302256440,"co_author_invite_id":4313614,"email":"a***y@brookes.ac.uk","display_order":3,"name":"Andrew Bradley","title":"Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction"},{"id":41048522,"work_id":114557325,"tagging_user_id":366407,"tagged_user_id":13800539,"co_author_invite_id":null,"email":"g***b@gmail.com","affiliation":"IIT Roorkee","display_order":4,"name":"Biplab Banerjee","title":"Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction"}],"downloadable_attachments":[{"id":111224952,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111224952/thumbnails/1.jpg","file_name":"2308.04589.pdf","download_url":"https://www.academia.edu/attachments/111224952/download_file","bulk_download_file_name":"Temporal_DINO_A_Self_supervised_Video_St.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111224952/2308.04589-libre.pdf?1707237644=\u0026response-content-disposition=attachment%3B+filename%3DTemporal_DINO_A_Self_supervised_Video_St.pdf\u0026Expires=1743344603\u0026Signature=P8o1hNbAm-7SBvMt1nQ2S1EyVXAy0pkREOyvEHATvYOnhOGris1e~oXsoCLZVqZO~YfYqf7tvHHUgw5aLlY24Jefa3VnmNnhL1YQx36rWnp83ir2Kd2tbG35aurxsmQ15eLKENV1~79X07uGr-Ip1MDaJihxfsySh5idF57AciKpANfxnts6iB1s-8NOFZpilGd1LUQ0VEgvJ09R-NhLHbegQR5eaAC5wHIOMp4eRoyg7S4qJnIT23BHnZi59W-vuv7HStyQSC4mjEv5g2IlHPdCd8YVSolvOlUNWlZRTlP8Rg2R1AP6w2eJG70F8fOHIJjWi-viB6TDEOUN~xV9Sg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Temporal_DINO_A_Self_supervised_Video_Strategy_to_Enhance_Action_Prediction","translated_slug":"","page_count":11,"language":"en","content_type":"Work","summary":"The emerging field of action prediction-the task of forecasting action in a video sequence-plays a vital role in various computer vision applications such as autonomous driving, activity analysis and human-computer interaction. Despite significant advancements, accurately predicting future actions remains a challenging problem due to high dimensionality, complex dynamics and uncertainties inherent in video data. Traditional supervised approaches require large amounts of labelled data, which is expensive and timeconsuming to obtain. This paper introduces a novel selfsupervised video strategy for enhancing action prediction inspired by DINO (self-distillation with no labels). The approach, named Temporal-DINO, employs two models; a 'student' processing past frames; and a 'teacher' processing both past and future frames, enabling a broader temporal context. During training, the teacher guides the student to learn future context by only observing past frames. The strategy is evaluated on ROAD dataset for the action prediction downstream task using 3D-ResNet, Transformer, and LSTM architectures. The experimental results showcase significant improvements in prediction performance across these architectures, with our method achieving an average enhancement of 9.9% Precision Points (PP), which highlights its effectiveness in enhancing the backbones' capabilities of capturing long-term dependencies. Furthermore, our approach demonstrates efficiency in terms of the pretraining dataset size and the number of epochs required. This method overcomes limitations present in other approaches, including the consideration of various backbone architectures, addressing multiple prediction horizons, reducing reliance on hand-crafted augmentations, and streamlining the pretraining process into a single stage. These findings highlight the potential of our approach in diverse video-based tasks such as activity recognition, motion planning, and scene understanding. Code can be found at https://github.com/IzzeddinTeeti/ssl pred.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":111224952,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111224952/thumbnails/1.jpg","file_name":"2308.04589.pdf","download_url":"https://www.academia.edu/attachments/111224952/download_file","bulk_download_file_name":"Temporal_DINO_A_Self_supervised_Video_St.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111224952/2308.04589-libre.pdf?1707237644=\u0026response-content-disposition=attachment%3B+filename%3DTemporal_DINO_A_Self_supervised_Video_St.pdf\u0026Expires=1743344603\u0026Signature=P8o1hNbAm-7SBvMt1nQ2S1EyVXAy0pkREOyvEHATvYOnhOGris1e~oXsoCLZVqZO~YfYqf7tvHHUgw5aLlY24Jefa3VnmNnhL1YQx36rWnp83ir2Kd2tbG35aurxsmQ15eLKENV1~79X07uGr-Ip1MDaJihxfsySh5idF57AciKpANfxnts6iB1s-8NOFZpilGd1LUQ0VEgvJ09R-NhLHbegQR5eaAC5wHIOMp4eRoyg7S4qJnIT23BHnZi59W-vuv7HStyQSC4mjEv5g2IlHPdCd8YVSolvOlUNWlZRTlP8Rg2R1AP6w2eJG70F8fOHIJjWi-viB6TDEOUN~xV9Sg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":15665,"name":"Video Processing","url":"https://www.academia.edu/Documents/in/Video_Processing"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":1286170,"name":"Self-Supervised Learning","url":"https://www.academia.edu/Documents/in/Self-Supervised_Learning"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-114557325-figures'); } }); </script> </div><div class="profile--tab_content_container js-tab-pane tab-pane" data-section-id="Videos" id="videos"><style type="text/css">/*thumbnail*/ .video-thumbnail-container { position: relative; height: 88px !important; box-sizing: content-box; } .thumbnail-image { height: 100%; width: 100%; object-fit: cover; } .play-icon { position: absolute; width: 40px; height: 40px; top: calc(50% - 20px); left: calc(50% - 20px); } .video-duration { position: absolute; bottom: 2px; right: 2px; color: #ffffff; background-color: #000000; font-size: 12px; font-weight: 500; line-height: 12px; padding: 2px; }</style><div class="js-work-strip profile--work_container" data-video-id="24558"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" href="https://www.academia.edu/video/k9yK7k"><div class="work-thumbnail video-thumbnail-container"><img class="thumbnail-image" onerror="this.src='//a.academia-assets.com/images/videoicon.svg'" src="https://academia-edu-videos.s3.amazonaws.com/transcoded/k9yK7k/thumbnail.jpg?response-content-disposition=inline%3B%20filename%3D%22thumbnail.jpg%22%3B%20filename%2A%3DUTF-8%27%27thumbnail.jpg&response-content-type=image%2Fjpeg&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=ASIATUSBJ6BABRSCM5TP%2F20250331%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250331T113250Z&X-Amz-Expires=20746&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEDwaCXVzLWVhc3QtMSJIMEYCIQDmRsdN07nC2cE4zkwv6jUkQvgUenh0%2FGtLen0eHwPylwIhAJWpwsqdFLPRzGmfwcpj6mWfz8o3wD5HlPVnQ5FwqbsbKpYECKT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEQABoMMjUwMzE4ODExMjAwIgyWRw%2F7h9PNgASRUQoq6gM7aq%2B%2FZhugiLodDyBK9fLzytW6f5C6Ko%2B9m267gmOZdWFqKMCugNyf1E4Yp3We0ZxlEZtmusBqcyAW4mk3iKwUqu9uEzuz9uBpr86O%2BYMqQIPTPwQlQaiKw%2BieCoLx3Ll11vADuZ%2Bx4m652TQVTGWM30Tq%2BP1ulxRpIDhMfnecCtVdJxizxE4x%2F5yvUqB9Yo0uAhX5sII1uqUCeLAmVdh1EyQo5H911N9eQTwsnteSKEedeo6N%2Fk8HgFREBt8ns0iD1TOJF1oDGigLGJN%2B7897ZM53DzQZzMINbSsde%2FIpNlgUmW1oONsJsskAyIpr1ngMIP%2BkZd6HGnPkUkwkS7gI1gSEuKdjerEq0LOIT3kEnEsjzCv7AAMxcjgqGyJC7HjoIOVSG%2FlLj66G%2Fyr3iSNbl%2FwgQz4cURfUg4AvmKO5xsdosqem6upFxkR7Artmyatp%2BCj2llT1BZ40yPH6MaM1%2B7XxxDNIdkm9yGlHW4Cfua%2BtZHk1%2FM%2Fz%2FbPn7kOkQHgi%2FdnrAdijahqtANp0b8bNw7ITFOaSX7zq0AnO6VJncuagyL7oRmvOc%2Be5HeoxyCPqg02%2Fzi%2Bc9fG9ityH36fVcUVuZOZu6n8dRSJEaE752BOqXtxf3z5vM1VxRjoCgu%2BTak7YLiDB1VuDMIz0qb8GOqQBT0hSRbUCp%2BieUnSgQ4VSt4G4OlKTFmz%2B3%2BdBpDbGWWGa87C3VyRyWd9UEiqt3mil%2Bm865z03aorT1JTe64zIBA468A4pvqXnsYTctihP3nhZ4CQSod8KB7jGdeE6rIrxaudtmnzHvLcLdJyLqfZRu4FhK3i1ebhecFj4bOWcTIMLidmXf6hsfS3p1%2FElXZXdMguzjWLoCbpPc9LIyEsA8B7pOEk%3D&X-Amz-SignedHeaders=host&X-Amz-Signature=4d73e33888bf11b9f283e4f73b164bfa8e3bb81b5acb8742bad9da7a2c619f18" /><img alt="Play" class="play-icon" src="//a.academia-assets.com/images/video-play-icon.svg" /><div class="video-duration">44:27</div></div></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" href="https://www.academia.edu/video/k9yK7k">The ROAD event awareness dataset for autonomous driving</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Invited talk @ DeepView: ``Global Multi-Target Visual Surveillance Based on Real-Time Large-Scale...</span><a class="js-work-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Invited talk @ DeepView: ``Global Multi-Target Visual Surveillance Based on Real-Time Large-Scale Analysis", AVSS 2021, Nov 16 2021 <a href="https://sites.google.com/view/deepview2021/" rel="nofollow">https://sites.google.com/view/deepview2021/</a> <br /> <br />Autonomous vehicles (AVs) employ a variety of sensors to identify roadside infrastructure and other road users, with much of the existing work focusing on scene understanding and robust object detection. Human drivers, however, approach the driving task in a more holistic fashion which entails, in particular, recognising and understanding the evolution of road events. Testing an AV’s capability to recognise the actions undertaken by other road agents is thus crucial to improve their situational awareness and facilitate decision making. <br />In this talk we introduce the ROad event Awareness Dataset (ROAD) for Autonomous Driving, to our knowledge the first of its kind. ROAD is explicitly designed to test the ability of an autonomous vehicle to detect road events.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-video-id="k9yK7k"><a class="js-profile-work-strip-edit-button" href="https://oxfordbrookes.academia.edu/video/edit/k9yK7k" rel="nofollow" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-video-id="k9yK7k">28 views</span></span></span></div></div></div><style type="text/css">/*thumbnail*/ .video-thumbnail-container { position: relative; height: 88px !important; box-sizing: content-box; } .thumbnail-image { height: 100%; width: 100%; object-fit: cover; } .play-icon { position: absolute; width: 40px; height: 40px; top: calc(50% - 20px); left: calc(50% - 20px); } .video-duration { position: absolute; bottom: 2px; right: 2px; color: #ffffff; background-color: #000000; font-size: 12px; font-weight: 500; line-height: 12px; padding: 2px; }</style><div class="js-work-strip profile--work_container" data-video-id="24563"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" href="https://www.academia.edu/video/jYADbj"><div class="work-thumbnail video-thumbnail-container"><img class="thumbnail-image" onerror="this.src='//a.academia-assets.com/images/videoicon.svg'" src="https://academia-edu-videos.s3.amazonaws.com/transcoded/jYADbj/thumbnail.jpg?response-content-disposition=inline%3B%20filename%3D%22thumbnail.jpg%22%3B%20filename%2A%3DUTF-8%27%27thumbnail.jpg&response-content-type=image%2Fjpeg&X-Amz-Algorithm=AWS4-HMAC-SHA256&X-Amz-Credential=ASIATUSBJ6BABRSCM5TP%2F20250331%2Fus-east-1%2Fs3%2Faws4_request&X-Amz-Date=20250331T113250Z&X-Amz-Expires=20746&X-Amz-Security-Token=IQoJb3JpZ2luX2VjEDwaCXVzLWVhc3QtMSJIMEYCIQDmRsdN07nC2cE4zkwv6jUkQvgUenh0%2FGtLen0eHwPylwIhAJWpwsqdFLPRzGmfwcpj6mWfz8o3wD5HlPVnQ5FwqbsbKpYECKT%2F%2F%2F%2F%2F%2F%2F%2F%2F%2FwEQABoMMjUwMzE4ODExMjAwIgyWRw%2F7h9PNgASRUQoq6gM7aq%2B%2FZhugiLodDyBK9fLzytW6f5C6Ko%2B9m267gmOZdWFqKMCugNyf1E4Yp3We0ZxlEZtmusBqcyAW4mk3iKwUqu9uEzuz9uBpr86O%2BYMqQIPTPwQlQaiKw%2BieCoLx3Ll11vADuZ%2Bx4m652TQVTGWM30Tq%2BP1ulxRpIDhMfnecCtVdJxizxE4x%2F5yvUqB9Yo0uAhX5sII1uqUCeLAmVdh1EyQo5H911N9eQTwsnteSKEedeo6N%2Fk8HgFREBt8ns0iD1TOJF1oDGigLGJN%2B7897ZM53DzQZzMINbSsde%2FIpNlgUmW1oONsJsskAyIpr1ngMIP%2BkZd6HGnPkUkwkS7gI1gSEuKdjerEq0LOIT3kEnEsjzCv7AAMxcjgqGyJC7HjoIOVSG%2FlLj66G%2Fyr3iSNbl%2FwgQz4cURfUg4AvmKO5xsdosqem6upFxkR7Artmyatp%2BCj2llT1BZ40yPH6MaM1%2B7XxxDNIdkm9yGlHW4Cfua%2BtZHk1%2FM%2Fz%2FbPn7kOkQHgi%2FdnrAdijahqtANp0b8bNw7ITFOaSX7zq0AnO6VJncuagyL7oRmvOc%2Be5HeoxyCPqg02%2Fzi%2Bc9fG9ityH36fVcUVuZOZu6n8dRSJEaE752BOqXtxf3z5vM1VxRjoCgu%2BTak7YLiDB1VuDMIz0qb8GOqQBT0hSRbUCp%2BieUnSgQ4VSt4G4OlKTFmz%2B3%2BdBpDbGWWGa87C3VyRyWd9UEiqt3mil%2Bm865z03aorT1JTe64zIBA468A4pvqXnsYTctihP3nhZ4CQSod8KB7jGdeE6rIrxaudtmnzHvLcLdJyLqfZRu4FhK3i1ebhecFj4bOWcTIMLidmXf6hsfS3p1%2FElXZXdMguzjWLoCbpPc9LIyEsA8B7pOEk%3D&X-Amz-SignedHeaders=host&X-Amz-Signature=4186090485480959bdb124c4a74877ee5d0f3c6cb98504ca6c1989b84017db21" /><img alt="Play" class="play-icon" src="//a.academia-assets.com/images/video-play-icon.svg" /><div class="video-duration">01:24:32</div></div></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" href="https://www.academia.edu/video/jYADbj">Belief functions: past, present and future</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Invited seminar, Department of Statistics, Harvard University, 2016 The theory of belief funct...</span><a class="js-work-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Invited seminar, Department of Statistics, Harvard University, 2016 <br /> <br />The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. The methodology is now well established as a general framework for reasoning with uncertainty, with well-understood connections to related frameworks such as probability, possibility, random set and imprecise probability theories. <br /> <br />This talk aims at bridging the gap between researchers in the field and the wider AI and Uncertainty Theory community, with the longer term goal of a more fruitful collaboration and dissemination of ideas.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-video-id="jYADbj"><a class="js-profile-work-strip-edit-button" href="https://oxfordbrookes.academia.edu/video/edit/jYADbj" rel="nofollow" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-video-id="jYADbj">44 views</span></span></span></div></div></div></div><div class="profile--tab_content_container js-tab-pane tab-pane" data-section-id="53457" id="papers"><div class="js-work-strip profile--work_container" data-work-id="127272558"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/127272558/Epistemic_Artificial_Intelligence_Using_random_sets_to_quantify_uncertainty_in_machine_learning"><img alt="Research paper thumbnail of Epistemic Artificial Intelligence: Using random sets to quantify uncertainty in machine learning" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title">Epistemic Artificial Intelligence: Using random sets to quantify uncertainty in machine learning</div><div class="wp-workCard_item"><span>Data Science for Econometrics and Related Topics</span><span>, 2025</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Quantifying uncertainty is fundamental in machine learning tasks, including classification, detec...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness,<br />interpretability, and reliability while effectively modelling uncertainty.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272558"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272558"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272558; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272558]").text(description); $(".js-view-count[data-work-id=127272558]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272558; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272558']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=127272558]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272558,"title":"Epistemic Artificial Intelligence: Using random sets to quantify uncertainty in machine learning","translated_title":"","metadata":{"abstract":"Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness,\ninterpretability, and reliability while effectively modelling uncertainty.","publication_date":{"day":null,"month":null,"year":2025,"errors":{}},"publication_name":"Data Science for Econometrics and Related Topics"},"translated_abstract":"Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness,\ninterpretability, and reliability while effectively modelling uncertainty.","internal_url":"https://www.academia.edu/127272558/Epistemic_Artificial_Intelligence_Using_random_sets_to_quantify_uncertainty_in_machine_learning","translated_internal_url":"","created_at":"2025-01-26T07:50:03.209-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":43025091,"work_id":127272558,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604285,"email":"1***5@brookes.ac.uk","display_order":0,"name":"Shireen Kudukkil Manchingal","title":"Epistemic Artificial Intelligence: Using random sets to quantify uncertainty in machine learning"},{"id":43025092,"work_id":127272558,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604258,"email":"s***n@brookes.ac.uk","display_order":1073741824,"name":"Salman Khan","title":"Epistemic Artificial Intelligence: Using random sets to quantify uncertainty in machine learning"}],"downloadable_attachments":[],"slug":"Epistemic_Artificial_Intelligence_Using_random_sets_to_quantify_uncertainty_in_machine_learning","translated_slug":"","page_count":null,"language":"en","content_type":"Work","summary":"Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness,\ninterpretability, and reliability while effectively modelling uncertainty.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":815,"name":"Epistemology","url":"https://www.academia.edu/Documents/in/Epistemology"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":41239,"name":"Bayesian statistics \u0026 modelling","url":"https://www.academia.edu/Documents/in/Bayesian_statistics_and_modelling"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-127272558-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272448"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272448/EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using_random_sets_to_quantify_uncertainty_in_machine_learning"><img alt="Research paper thumbnail of EPISTEMIC ARTIFICIAL INTELLIGENCE: Using random sets to quantify uncertainty in machine learning" class="work-thumbnail" src="https://attachments.academia-assets.com/121029150/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272448/EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using_random_sets_to_quantify_uncertainty_in_machine_learning">EPISTEMIC ARTIFICIAL INTELLIGENCE: Using random sets to quantify uncertainty in machine learning</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Quantifying uncertainty is fundamental in machine learning tasks, including classification, detec...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness, interpretability, and reliability while effectively modelling uncertainty.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="e2f76d8380fa7a8a090f927e7651b266" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121029150,"asset_id":127272448,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121029150/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272448"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272448"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272448; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272448]").text(description); $(".js-view-count[data-work-id=127272448]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272448; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272448']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "e2f76d8380fa7a8a090f927e7651b266" } } $('.js-work-strip[data-work-id=127272448]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272448,"title":"EPISTEMIC ARTIFICIAL INTELLIGENCE: Using random sets to quantify uncertainty in machine learning","translated_title":"","metadata":{"grobid_abstract":"Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness, interpretability, and reliability while effectively modelling uncertainty.","grobid_abstract_attachment_id":121029150},"translated_abstract":null,"internal_url":"https://www.academia.edu/127272448/EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using_random_sets_to_quantify_uncertainty_in_machine_learning","translated_internal_url":"","created_at":"2025-01-26T07:45:42.650-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":121029150,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029150/thumbnails/1.jpg","file_name":"Epistemic_AI_using_random_sets_to_quantify_uncertainty_in_machine_learning_Invited_paper_.pdf","download_url":"https://www.academia.edu/attachments/121029150/download_file","bulk_download_file_name":"EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029150/Epistemic_AI_using_random_sets_to_quantify_uncertainty_in_machine_learning_Invited_paper_-libre.pdf?1737909163=\u0026response-content-disposition=attachment%3B+filename%3DEPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using.pdf\u0026Expires=1743344600\u0026Signature=GLTvjeKD59hhXRZNgj2HdK8STCh2R83PfD~GBWhdN~vvnFh5IveudAzTWu~gzhuE-GVlX2exSxBk~YhBzwtgq6jP6YXQh0VDrp6t72UspyWZO5CsNIPXC44lJ1MVh3Vlf3hvXLIETi~eC9mwcUKI4-FfxL2MjMXjyK6SmPUQkGRB178-k-7KDlZfsaD9NhjEBbtgg9ZNGY2cx~eIJvnKzNzlUj8fqTVtq8x~JePtnQ1AHkSafYIWSjDJqQiSDfQ6XhHwxtPtBdP1OEkOiKnkAqg6My7t4d862qyxl1ozlNib-8CGAhj-YJCFpZWq1syjO8L6SKUPzJy~TXphL5UX3g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using_random_sets_to_quantify_uncertainty_in_machine_learning","translated_slug":"","page_count":28,"language":"en","content_type":"Work","summary":"Quantifying uncertainty is fundamental in machine learning tasks, including classification, detection in complex domains such as computer vision (CV) and text generation in large language models (LLMs). This is especially crucial when artificial intelligence (AI) is used in safety-critical applications, such as, e.g., autonomous driving or medical diagnosis, where reliable decisions are crucial to prevent serious consequences. The Epistemic AI project explores the use of random sets for quantifying epistemic uncertainty in AI. A mathematical framework which generalizes the concept of random variables to sets, random sets enable a more flexible and expressive approach to uncertainty modeling. This work proposes ways to employ the random sets formalism to model classification uncertainty over both the target and parameter spaces of a machine learning model (e.g., a neural network), as well as detection uncertainty, within the context of computer vision. The applicability and effectiveness of random sets is also demonstrated in large language models, where they can be utilized to model uncertainty in natural language processing tasks. We show how, by leveraging random set theory, machine learning models can achieve enhanced robustness, interpretability, and reliability while effectively modelling uncertainty.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121029150,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029150/thumbnails/1.jpg","file_name":"Epistemic_AI_using_random_sets_to_quantify_uncertainty_in_machine_learning_Invited_paper_.pdf","download_url":"https://www.academia.edu/attachments/121029150/download_file","bulk_download_file_name":"EPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029150/Epistemic_AI_using_random_sets_to_quantify_uncertainty_in_machine_learning_Invited_paper_-libre.pdf?1737909163=\u0026response-content-disposition=attachment%3B+filename%3DEPISTEMIC_ARTIFICIAL_INTELLIGENCE_Using.pdf\u0026Expires=1743344600\u0026Signature=GLTvjeKD59hhXRZNgj2HdK8STCh2R83PfD~GBWhdN~vvnFh5IveudAzTWu~gzhuE-GVlX2exSxBk~YhBzwtgq6jP6YXQh0VDrp6t72UspyWZO5CsNIPXC44lJ1MVh3Vlf3hvXLIETi~eC9mwcUKI4-FfxL2MjMXjyK6SmPUQkGRB178-k-7KDlZfsaD9NhjEBbtgg9ZNGY2cx~eIJvnKzNzlUj8fqTVtq8x~JePtnQ1AHkSafYIWSjDJqQiSDfQ6XhHwxtPtBdP1OEkOiKnkAqg6My7t4d862qyxl1ozlNib-8CGAhj-YJCFpZWq1syjO8L6SKUPzJy~TXphL5UX3g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-127272448-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272380"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272380/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks"><img alt="Research paper thumbnail of CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks" class="work-thumbnail" src="https://attachments.academia-assets.com/121029070/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272380/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks">CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a> and <a class="" data-click-track="profile-work-strip-authors" href="https://ugent.academia.edu/kshariat">Keivan Sh</a></span></div><div class="wp-workCard_item"><span>Machine Learning</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Uncertainty estimation is increasingly attractive for improving the reliability of neural network...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="84398b4e39a9cf3140787369d21d82e2" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121029070,"asset_id":127272380,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121029070/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272380"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272380"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272380; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272380]").text(description); $(".js-view-count[data-work-id=127272380]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272380; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272380']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "84398b4e39a9cf3140787369d21d82e2" } } $('.js-work-strip[data-work-id=127272380]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272380,"title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks","translated_title":"","metadata":{"abstract":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","ai_title_tag":"Credal-Set Interval Neural Networks for Uncertainty","publication_name":"Machine Learning"},"translated_abstract":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","internal_url":"https://www.academia.edu/127272380/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks","translated_internal_url":"","created_at":"2025-01-26T07:43:04.675-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":43025074,"work_id":127272380,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":1,"name":"Kaizheng Wang","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":43025075,"work_id":127272380,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":2,"name":"Keivan Shariatmadar","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":43025076,"work_id":127272380,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053054,"email":"d***s@kuleuven.be","display_order":4,"name":"David Moens","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":43025077,"work_id":127272380,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4480510,"email":"h***z@kuleuven.be","display_order":5,"name":"Hans Hallez","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":43025078,"work_id":127272380,"tagging_user_id":366407,"tagged_user_id":538842,"co_author_invite_id":null,"email":"k***t@gmail.com","affiliation":"Ghent University","display_order":6,"name":"Keivan Sh","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"}],"downloadable_attachments":[{"id":121029070,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029070/thumbnails/1.jpg","file_name":"2401.05043.pdf","download_url":"https://www.academia.edu/attachments/121029070/download_file","bulk_download_file_name":"CreINNs_Credal_Set_Interval_Neural_Netwo.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029070/2401.05043-libre.pdf?1737909166=\u0026response-content-disposition=attachment%3B+filename%3DCreINNs_Credal_Set_Interval_Neural_Netwo.pdf\u0026Expires=1743344601\u0026Signature=ZNQZ~CITPyzRuvgVCWZNV5PEdoI2rwi5IQW4~NjzL0JnUmngk8YnWV3LrnNeZdjQ0fbfz14atGC4FQ5M-3L36E408iendC~14WCm87iBWWHInF3SF6698kfVCUBuVkwauu~HnUjrKxhjtlKERXtdxhI9FTwzrtFJJHrLkfuOTpMuFBNbsUucn~IZFeKwMyabWVzYYLmfnEREzVypYqc9lX6jjSvVecn4icCs0lE-b6QClRAzrr4MRRrJV3h78NjHmL09FeyyGJnpGua3Hinmk64muwLGNRpSOtQhAXuO7YfKVcbUUs1GnEHoCSSLrFeoC~Gh-p-K1CjqwxOBav4Myg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks","translated_slug":"","page_count":11,"language":"en","content_type":"Work","summary":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121029070,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029070/thumbnails/1.jpg","file_name":"2401.05043.pdf","download_url":"https://www.academia.edu/attachments/121029070/download_file","bulk_download_file_name":"CreINNs_Credal_Set_Interval_Neural_Netwo.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029070/2401.05043-libre.pdf?1737909166=\u0026response-content-disposition=attachment%3B+filename%3DCreINNs_Credal_Set_Interval_Neural_Netwo.pdf\u0026Expires=1743344601\u0026Signature=ZNQZ~CITPyzRuvgVCWZNV5PEdoI2rwi5IQW4~NjzL0JnUmngk8YnWV3LrnNeZdjQ0fbfz14atGC4FQ5M-3L36E408iendC~14WCm87iBWWHInF3SF6698kfVCUBuVkwauu~HnUjrKxhjtlKERXtdxhI9FTwzrtFJJHrLkfuOTpMuFBNbsUucn~IZFeKwMyabWVzYYLmfnEREzVypYqc9lX6jjSvVecn4icCs0lE-b6QClRAzrr4MRRrJV3h78NjHmL09FeyyGJnpGua3Hinmk64muwLGNRpSOtQhAXuO7YfKVcbUUs1GnEHoCSSLrFeoC~Gh-p-K1CjqwxOBav4Myg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":41815,"name":"Applied Probability","url":"https://www.academia.edu/Documents/in/Applied_Probability"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":177212,"name":"Interval analysis","url":"https://www.academia.edu/Documents/in/Interval_analysis"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-127272380-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272278"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272278/Analyzing_the_Feasibility_of_Achieving_Machine_Theory_of_Mind_through_ToMnet_like_Approaches"><img alt="Research paper thumbnail of Analyzing the Feasibility of Achieving Machine Theory of Mind through ToMnet-like Approaches" class="work-thumbnail" src="https://attachments.academia-assets.com/121029000/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272278/Analyzing_the_Feasibility_of_Achieving_Machine_Theory_of_Mind_through_ToMnet_like_Approaches">Analyzing the Feasibility of Achieving Machine Theory of Mind through ToMnet-like Approaches</a></div><div class="wp-workCard_item"><span>Workshop on Advancing Artificial Intelligence through Theory of Mind (ToM4AI)</span><span>, 2025</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">This paper critically examines the efficacy of the ToMnet family of models in achieving a genuine...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">This paper critically examines the efficacy of the ToMnet family of models in achieving a genuine Machine Theory of Mind (ToM). We introduce ToMnet-N, an open-source evolution of ToMnet+, developed to replicate and extend previous experiments while addressing architectural limitations. Our analysis reveals that ToMnet-N's success in predictive tasks stems primarily from pattern recognition within training data rather than true mental state understanding. These findings question the validity of ToMnet-like approaches in modeling authentic ToM capabilities and suggest that a paradigm shift may be necessary. This work contributes to the field of Computational ToM by providing a comprehensive evaluation and proposing directions for future research.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="e65279cddcac522951f0ae9c9d1a9333" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121029000,"asset_id":127272278,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121029000/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272278"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272278"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272278; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272278]").text(description); $(".js-view-count[data-work-id=127272278]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272278; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272278']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "e65279cddcac522951f0ae9c9d1a9333" } } $('.js-work-strip[data-work-id=127272278]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272278,"title":"Analyzing the Feasibility of Achieving Machine Theory of Mind through ToMnet-like Approaches","translated_title":"","metadata":{"abstract":"This paper critically examines the efficacy of the ToMnet family of models in achieving a genuine Machine Theory of Mind (ToM). We introduce ToMnet-N, an open-source evolution of ToMnet+, developed to replicate and extend previous experiments while addressing architectural limitations. Our analysis reveals that ToMnet-N's success in predictive tasks stems primarily from pattern recognition within training data rather than true mental state understanding. These findings question the validity of ToMnet-like approaches in modeling authentic ToM capabilities and suggest that a paradigm shift may be necessary. This work contributes to the field of Computational ToM by providing a comprehensive evaluation and proposing directions for future research.","publication_date":{"day":null,"month":null,"year":2025,"errors":{}},"publication_name":"Workshop on Advancing Artificial Intelligence through Theory of Mind (ToM4AI)"},"translated_abstract":"This paper critically examines the efficacy of the ToMnet family of models in achieving a genuine Machine Theory of Mind (ToM). We introduce ToMnet-N, an open-source evolution of ToMnet+, developed to replicate and extend previous experiments while addressing architectural limitations. Our analysis reveals that ToMnet-N's success in predictive tasks stems primarily from pattern recognition within training data rather than true mental state understanding. These findings question the validity of ToMnet-like approaches in modeling authentic ToM capabilities and suggest that a paradigm shift may be necessary. This work contributes to the field of Computational ToM by providing a comprehensive evaluation and proposing directions for future research.","internal_url":"https://www.academia.edu/127272278/Analyzing_the_Feasibility_of_Achieving_Machine_Theory_of_Mind_through_ToMnet_like_Approaches","translated_internal_url":"","created_at":"2025-01-26T07:37:59.029-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":121029000,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029000/thumbnails/1.jpg","file_name":"1_Analyzing_the_Feasibility_of.pdf","download_url":"https://www.academia.edu/attachments/121029000/download_file","bulk_download_file_name":"Analyzing_the_Feasibility_of_Achieving_M.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029000/1_Analyzing_the_Feasibility_of-libre.pdf?1737909164=\u0026response-content-disposition=attachment%3B+filename%3DAnalyzing_the_Feasibility_of_Achieving_M.pdf\u0026Expires=1743344601\u0026Signature=O5EUIY~Ku03H8TV9vjrFBt1UBDecReMp-RQbtb18rggHFAT6DiRC-byfIhd~qNXOOFSTsIeD1g-~sNM~RcF6t3m-dyvh7kMEg7NQwOpozb1gAPLwI2-vMN01d9ubuFbxQAsD-9keytRfAlnExSEjCx7uqJH8YjuGFuv2xZ3~TvGJh2J-JdRJ0HeyflDSoSL5xEpm6w4MvrQjUa~71nUGmCfkToT10usQpQlfrGpoxe5G6g6nJvKP3vWmHuaZFtVljDC6ofhWM2omiEP0LgF9Xx0pGEX33rWT56IkAkd8oOWAln~MxLQg1~dSmozTA3kh1k63GZzU6s-~uIKSh08pMw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Analyzing_the_Feasibility_of_Achieving_Machine_Theory_of_Mind_through_ToMnet_like_Approaches","translated_slug":"","page_count":2,"language":"en","content_type":"Work","summary":"This paper critically examines the efficacy of the ToMnet family of models in achieving a genuine Machine Theory of Mind (ToM). We introduce ToMnet-N, an open-source evolution of ToMnet+, developed to replicate and extend previous experiments while addressing architectural limitations. Our analysis reveals that ToMnet-N's success in predictive tasks stems primarily from pattern recognition within training data rather than true mental state understanding. These findings question the validity of ToMnet-like approaches in modeling authentic ToM capabilities and suggest that a paradigm shift may be necessary. This work contributes to the field of Computational ToM by providing a comprehensive evaluation and proposing directions for future research.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121029000,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121029000/thumbnails/1.jpg","file_name":"1_Analyzing_the_Feasibility_of.pdf","download_url":"https://www.academia.edu/attachments/121029000/download_file","bulk_download_file_name":"Analyzing_the_Feasibility_of_Achieving_M.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121029000/1_Analyzing_the_Feasibility_of-libre.pdf?1737909164=\u0026response-content-disposition=attachment%3B+filename%3DAnalyzing_the_Feasibility_of_Achieving_M.pdf\u0026Expires=1743344601\u0026Signature=O5EUIY~Ku03H8TV9vjrFBt1UBDecReMp-RQbtb18rggHFAT6DiRC-byfIhd~qNXOOFSTsIeD1g-~sNM~RcF6t3m-dyvh7kMEg7NQwOpozb1gAPLwI2-vMN01d9ubuFbxQAsD-9keytRfAlnExSEjCx7uqJH8YjuGFuv2xZ3~TvGJh2J-JdRJ0HeyflDSoSL5xEpm6w4MvrQjUa~71nUGmCfkToT10usQpQlfrGpoxe5G6g6nJvKP3vWmHuaZFtVljDC6ofhWM2omiEP0LgF9Xx0pGEX33rWT56IkAkd8oOWAln~MxLQg1~dSmozTA3kh1k63GZzU6s-~uIKSh08pMw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4937,"name":"Theory of Mind","url":"https://www.academia.edu/Documents/in/Theory_of_Mind"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":20053,"name":"Theory of Mind (Psychology)","url":"https://www.academia.edu/Documents/in/Theory_of_Mind_Psychology_"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-127272278-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272251"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272251/Random_Set_Neural_Networks"><img alt="Research paper thumbnail of Random-Set Neural Networks" class="work-thumbnail" src="https://attachments.academia-assets.com/121028921/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272251/Random_Set_Neural_Networks">Random-Set Neural Networks</a></div><div class="wp-workCard_item"><span>2025 International Conference on Learning Representations (ICLR 2025)</span><span>, 2025</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Machine learning is increasingly deployed in safety-critical domains where erroneous predictions ...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Machine learning is increasingly deployed in safety-critical domains where erroneous predictions may lead to potentially catastrophic consequences, highlighting the need for learning systems to be aware of how confident they are in their own predictions: in other words, 'to know when they do not know'. In this paper, we propose a novel Random-Set Neural Network (RS-NN) approach to classification which predicts belief functions (rather than classical probability vectors) over the class list using the mathematics of random sets, i.e., distributions over the collection of sets of classes. RS-NN encodes the 'epistemic' uncertainty induced by training sets that are insufficiently representative or limited in size via the size of the convex set of probability vectors associated with a predicted belief function. Our approach outperforms state-of-the-art Bayesian and Ensemble methods in terms of accuracy, uncertainty estimation and out-of-distribution (OoD) detection on multiple benchmarks (CIFAR-10 vs SVHN/Intel-Image, MNIST vs FM-NIST/KMNIST, ImageNet vs ImageNet-O). RS-NN also scales up effectively to large-scale architectures (e.g. WideResNet-28-10, VGG16, Inception V3, Effi-cientNetB2 and ViT-Base-16), exhibits remarkable robustness to adversarial attacks and can provide statistical guarantees in a conformal learning setting.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="f2d7a6d440cf96ede9262a6e19968d3e" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121028921,"asset_id":127272251,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121028921/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272251"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272251"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272251; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272251]").text(description); $(".js-view-count[data-work-id=127272251]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272251; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272251']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "f2d7a6d440cf96ede9262a6e19968d3e" } } $('.js-work-strip[data-work-id=127272251]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272251,"title":"Random-Set Neural Networks","translated_title":"","metadata":{"abstract":"Machine learning is increasingly deployed in safety-critical domains where erroneous predictions may lead to potentially catastrophic consequences, highlighting the need for learning systems to be aware of how confident they are in their own predictions: in other words, 'to know when they do not know'. In this paper, we propose a novel Random-Set Neural Network (RS-NN) approach to classification which predicts belief functions (rather than classical probability vectors) over the class list using the mathematics of random sets, i.e., distributions over the collection of sets of classes. RS-NN encodes the 'epistemic' uncertainty induced by training sets that are insufficiently representative or limited in size via the size of the convex set of probability vectors associated with a predicted belief function. Our approach outperforms state-of-the-art Bayesian and Ensemble methods in terms of accuracy, uncertainty estimation and out-of-distribution (OoD) detection on multiple benchmarks (CIFAR-10 vs SVHN/Intel-Image, MNIST vs FM-NIST/KMNIST, ImageNet vs ImageNet-O). RS-NN also scales up effectively to large-scale architectures (e.g. WideResNet-28-10, VGG16, Inception V3, Effi-cientNetB2 and ViT-Base-16), exhibits remarkable robustness to adversarial attacks and can provide statistical guarantees in a conformal learning setting.","ai_title_tag":"Random-Set Neural Networks for Uncertainty Estimation","publication_date":{"day":null,"month":null,"year":2025,"errors":{}},"publication_name":"2025 International Conference on Learning Representations (ICLR 2025)"},"translated_abstract":"Machine learning is increasingly deployed in safety-critical domains where erroneous predictions may lead to potentially catastrophic consequences, highlighting the need for learning systems to be aware of how confident they are in their own predictions: in other words, 'to know when they do not know'. In this paper, we propose a novel Random-Set Neural Network (RS-NN) approach to classification which predicts belief functions (rather than classical probability vectors) over the class list using the mathematics of random sets, i.e., distributions over the collection of sets of classes. RS-NN encodes the 'epistemic' uncertainty induced by training sets that are insufficiently representative or limited in size via the size of the convex set of probability vectors associated with a predicted belief function. Our approach outperforms state-of-the-art Bayesian and Ensemble methods in terms of accuracy, uncertainty estimation and out-of-distribution (OoD) detection on multiple benchmarks (CIFAR-10 vs SVHN/Intel-Image, MNIST vs FM-NIST/KMNIST, ImageNet vs ImageNet-O). RS-NN also scales up effectively to large-scale architectures (e.g. WideResNet-28-10, VGG16, Inception V3, Effi-cientNetB2 and ViT-Base-16), exhibits remarkable robustness to adversarial attacks and can provide statistical guarantees in a conformal learning setting.","internal_url":"https://www.academia.edu/127272251/Random_Set_Neural_Networks","translated_internal_url":"","created_at":"2025-01-26T07:35:07.569-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":43025046,"work_id":127272251,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604285,"email":"1***5@brookes.ac.uk","display_order":1,"name":"Shireen Kudukkil Manchingal","title":"Random-Set Neural Networks"},{"id":43025047,"work_id":127272251,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":2,"name":"Kaizheng Wang","title":"Random-Set Neural Networks"},{"id":43025048,"work_id":127272251,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":3,"name":"Keivan Shariatmadar","title":"Random-Set Neural Networks"}],"downloadable_attachments":[{"id":121028921,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028921/thumbnails/1.jpg","file_name":"4961_Random_Set_Neural_Network.pdf","download_url":"https://www.academia.edu/attachments/121028921/download_file","bulk_download_file_name":"Random_Set_Neural_Networks.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028921/4961_Random_Set_Neural_Network-libre.pdf?1737909454=\u0026response-content-disposition=attachment%3B+filename%3DRandom_Set_Neural_Networks.pdf\u0026Expires=1743344601\u0026Signature=Sr6~AaJ963qLj~ORlmdsOfzGaLR0usIk0CpPE2mbincjK~PbLE0i~tgEGerFrm1mJIM52lUfwx3nzJ8~sOQyuei8DH2XLpwXYt3HMtxdy4S-OlGpUBBIka9l11NFpiF6UeQO3Dk6Ww0fy6SBH9OO~ro9tQEG0ETaeH-OYsS6C99UBpayEGBmJLwH6uSQvC-ThXEabcZ1Rc8VNxBRUUK~TPzwk7QxWUSJ49e2Er6IN1ZVipZG4pQkoGoQlSYf7QObqqR82rfbPjI~zUfg96-0ci4QDaQrMPeJBFLnHQ-42vuj62JGWmaJpDuOxuvHGeOONZwyRln0p-v~J7DDOLAsWA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Random_Set_Neural_Networks","translated_slug":"","page_count":42,"language":"en","content_type":"Work","summary":"Machine learning is increasingly deployed in safety-critical domains where erroneous predictions may lead to potentially catastrophic consequences, highlighting the need for learning systems to be aware of how confident they are in their own predictions: in other words, 'to know when they do not know'. In this paper, we propose a novel Random-Set Neural Network (RS-NN) approach to classification which predicts belief functions (rather than classical probability vectors) over the class list using the mathematics of random sets, i.e., distributions over the collection of sets of classes. RS-NN encodes the 'epistemic' uncertainty induced by training sets that are insufficiently representative or limited in size via the size of the convex set of probability vectors associated with a predicted belief function. Our approach outperforms state-of-the-art Bayesian and Ensemble methods in terms of accuracy, uncertainty estimation and out-of-distribution (OoD) detection on multiple benchmarks (CIFAR-10 vs SVHN/Intel-Image, MNIST vs FM-NIST/KMNIST, ImageNet vs ImageNet-O). RS-NN also scales up effectively to large-scale architectures (e.g. WideResNet-28-10, VGG16, Inception V3, Effi-cientNetB2 and ViT-Base-16), exhibits remarkable robustness to adversarial attacks and can provide statistical guarantees in a conformal learning setting.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121028921,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028921/thumbnails/1.jpg","file_name":"4961_Random_Set_Neural_Network.pdf","download_url":"https://www.academia.edu/attachments/121028921/download_file","bulk_download_file_name":"Random_Set_Neural_Networks.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028921/4961_Random_Set_Neural_Network-libre.pdf?1737909454=\u0026response-content-disposition=attachment%3B+filename%3DRandom_Set_Neural_Networks.pdf\u0026Expires=1743344601\u0026Signature=Sr6~AaJ963qLj~ORlmdsOfzGaLR0usIk0CpPE2mbincjK~PbLE0i~tgEGerFrm1mJIM52lUfwx3nzJ8~sOQyuei8DH2XLpwXYt3HMtxdy4S-OlGpUBBIka9l11NFpiF6UeQO3Dk6Ww0fy6SBH9OO~ro9tQEG0ETaeH-OYsS6C99UBpayEGBmJLwH6uSQvC-ThXEabcZ1Rc8VNxBRUUK~TPzwk7QxWUSJ49e2Er6IN1ZVipZG4pQkoGoQlSYf7QObqqR82rfbPjI~zUfg96-0ci4QDaQrMPeJBFLnHQ-42vuj62JGWmaJpDuOxuvHGeOONZwyRln0p-v~J7DDOLAsWA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":41815,"name":"Applied Probability","url":"https://www.academia.edu/Documents/in/Applied_Probability"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":492766,"name":"Random Sets","url":"https://www.academia.edu/Documents/in/Random_Sets"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-127272251-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272202"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272202/Credal_wrapper_of_model_averaging_for_uncertainty_estimation_in_classification"><img alt="Research paper thumbnail of Credal wrapper of model averaging for uncertainty estimation in classification" class="work-thumbnail" src="https://attachments.academia-assets.com/121028826/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272202/Credal_wrapper_of_model_averaging_for_uncertainty_estimation_in_classification">Credal wrapper of model averaging for uncertainty estimation in classification</a></div><div class="wp-workCard_item"><span>2025 International Conference on Learning Representations (ICLR 2025)</span><span>, 2025</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">This paper presents an innovative approach, called credal wrapper, to formulating a credal set re...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">This paper presents an innovative approach, called credal wrapper, to formulating a credal set representation of model averaging for Bayesian neural networks (BNNs) and deep ensembles (DEs), capable of improving uncertainty estimation in classification tasks. Given a finite collection of single predictive distributions derived from BNNs or DEs, the proposed credal wrapper approach extracts an upper and a lower probability bound per class, acknowledging the epistemic uncertainty due to the availability of a limited amount of distributions. Such probability intervals over classes can be mapped on a convex set of probabilities (a credal set) from which, in turn, a unique prediction can be obtained using a transformation called intersection probability transformation. In this article, we conduct extensive experiments on several out-of-distribution (OOD) detection benchmarks, encompassing various dataset pairs (CIFAR10/100 vs SVHN/Tiny-ImageNet, CI-FAR10 vs CIFAR10-C, CIFAR100 vs CIFAR100-C and ImageNet vs ImageNet-O) and using different network architectures (such as VGG16, ResNet-18/50, Ef-ficientNet B2, and ViT Base). Compared to the BNN and DE baselines, the proposed credal wrapper method exhibits superior performance in uncertainty estimation and achieves a lower expected calibration error on corrupted data.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-127272202-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-127272202-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890772/figure-1-inspired-by-the-use-of-probability-intervals-for"><img alt="[Inspired by the use of probability intervals for decision-making (Yager & Kreinovich, 1999; Guo & Tanaka, 2010), we propose to build probability intervals by extracting the upper and lower bound per class from the given set of limited (categorical) probability distributions, validating this choice via extensive experiments in Section 4. E.g., consider again the task of predicting weather con- ditions (rainy, sunny, or cloudy). When receiving three probability values for the rainy condition, 2.g., 0.2, 0.1, and 0.7, using probability intervals we model the uncertainty on the probability of the rainy condition as [0.1,0.7]. Each probability interval system can determine a convex set of probabilities over the set of classes, i.e., a credal set. Such a credal set is a more natural model than individual distributions for representing the epistemic uncertainty encoded by the prediction, as it amounts to constraints on the unknown exact distribution (Hiillermeier & Waegeman, 2021; Shaker & Hiillermeier, 2021; Sale et al., 2023a). Nevertheless, a single predictive distribution, termed in- tersection probability, can still be derived from a credal set to generate a unique class prediction for classification purposes. Our credal wrapper framework is depicted in Figure 1. The remainder of this section discusses the credal wrapper generation, a method for computational complexity reduction for uncertainty estimation, and the intersection probability, in this order. Figure 1: Credal wrapper framework for a three-class (A, B, D) classification task. Given a set of individual probability distributions (denoted as single dots) in the simplex (triangle) of probability distributions of the classes, probability intervals (parallel lines) are derived by extracting the upper and lower probability bounds per class, using eq. (5). Such lower and upper probability intervals induce a credal set on {A, B, D} (P, light blue convex hull in the triangle). A single intersection probability (the red dot) is computed from the credal set using the transform in eq. (5). Uncertainty is estimated in the mathematical framework of credal sets in eq. (4). " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890777/table-15-below-the-baselines-and-poor-eu-estimation"><img alt="below the baselines) and poor EU estimation (evidenced by the lowest OOD detection values), as shown in Table 15 in the Appendix. Figure 2: OOD detection using EU as the metric on CIFAR10 vs CIFAR10-C of the classical and credal wrapper version of BNNs and DE, and EDD against increased corruption intensity, using VGG16 and ResNet-18 as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890789/figure-3-ece-values-of-bnnr-bnnf-and-de-on-cifario-against"><img alt="Figure 3: ECE values of BNNR, BNNF, and DE on CIFARIO-C against increased corruption in- tensity, using the averaged probability (Prob.) and our proposed intersection probability (Prob.). VGGI16 and ResNet-18 are backbones. Results are from 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890802/figure-4-ood-detection-performance-of-the-classical-and"><img alt="Figure 4: OOD detection performance of the classical and credal wrapper version of DEs using EL as the metric on CIFAR10/100 vs CIFAR10-C/100-C against increased corruption intensity, using ResNet-50, EffB2, and ViT-B as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890812/figure-5-ece-values-of-des-on-cifar-and-cifar-against"><img alt="Figure 5: ECE values of DEs on CIFAR10-C and CIFAR100-C against increased corruption in- tensity, using the averaged probability (Prob.) and our proposed intersection probability (Prob.). ResNet-50, EffB2, and ViT-B are backbones. Results are from 15 runs. Then, we construct DEs using different numbers of ensemble members, namely N =3, 5, 10, 15, 20 and 25. Each type of DEs includes 15 instances using distinct seed combinations. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890822/figure-6-ece-values-of-des-with-various-nv-on-cifar-against"><img alt="Figure 6: ECE values of DEs with various NV on CIFAR10-C against increased corruption intensity. of samples overall can lead to a lower ECE value; (ii) Compared to the the naive averaging DE pre- dictions, our intersection probability consistently achieves lower ECE values on corrupted instances Ablation Study on Numbers of Predictive Samples in BNNs In this experiment, we first increase the sampling size of BNNs at prediction time, namely N = 10 and N =50. Table 6 reports the OOD detection performance of BNNR and BNNF involving CIFAR10 (ID) vs SVHN and Tiny-ImageNet (OODs), based on the VGG16 backbone. It shows that credal wrapper consistently produces better EU estimates, as evidenced by enhanced OOD detection performance. Further, Table 12 in the Ap- pendix reports the same comparison based on the ResNet-18 architecture, confirming those results. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890840/figure-7-ood-detection-using-tu-as-metric-on-cifar-vs"><img alt="Figure 7: OOD detection using TU as metric on CIFAR10 vs CIFARIO-C of both the classical and credal wrapper version BNNs and DE against increased corruption intensity, using VGG16 and ResNet-18 as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890847/figure-8-ood-detection-using-tu-as-the-metric-on-cifar-vs"><img alt="Figure 8: OOD detection using TU as the metric on CIFAR10/100 vs CIFAR10-C/100-C of both the classical and credal wrapper version of DEs against increased corruption intensity, using ResNet-50, EffB2, and ViT-B as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890854/figure-9-eu-and-tu-estimates-of-id-cifar-and-ood-svhn-and"><img alt="Figure 9: EU and TU estimates of ID (CIFAR10) and OOD (SVHN and Tiny-ImageNet) samples of the classical and credal wrapper version of DEs, obtained using ResNet-50, EffB2, and ViT backbones. Results are from 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890869/figure-10-eu-and-tu-estimates-of-id-cifar-and-ood-svhn-and"><img alt="Figure 10: EU and TU estimates of ID (CIFAR100) and OOD (SVHN and Tiny-ImageNet) samples of the classical and credal wrapper version of DEs, obtained using ResNet-50, EffB2, and ViT backbones. Results are from 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890888/figure-11-in-this-section-we-further-evaluate-the"><img alt="In this section, we further evaluate the intersection probability on corrupted samples using the neg- ative log-likelihood (NLL) metric. A smaller NLL indicates that the model is more confident and accurate in predicting the correct class for each input (Dusenberry et al., 2020). Figures 11 and 12 show the consistent superiority of the intersection probability on corrupted data in extensive test cases, as evidenced by smaller NLL values. Figure 11: NLL values of BNNR, BNNF, and DE on CIFARIO-C against increased corruption intensity, using the averaged probability (Avg. Prob.) and our proposed intersection probability (Int. Prob.). VGG16 and ResNet-18 are backbones. Results are from 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890900/figure-12-nll-values-of-des-on-cifari-and-cifar-against"><img alt="Figure 12: NLL values of DEs on CIFAR1I0-C and CIFAR100-C against increased corruption in- tensity, using the averaged probability (Avg. Prob.) and our proposed intersection probability (Int. Prob.). ResNet-50, EffB2, and ViT-B are backbones. Results are from 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890914/figure-13-different-credal-set-generation-methods-left-our"><img alt="Figure 13: Different credal set generation methods. Left: our credal wrapper; right: convex hull. The theoretical underpinning for the convex hull method when reasoning with coherent lower prob- abilities (and, therefore, the corresponding credal sets) is that it allows us to comply with the co- herence principle (Walley, 1991). In a Bayesian context, individual predictions (such as those of networks with specified weights) can be interpreted as subjective pieces of evidence about a fact (e.g., what is the true class of an input observation). Coherence ensures that one realizes the full implications of such partial assessments (Walley, 1991; Cuzzolin, 2008). Figure 13 conceptually shows the differences between two methods in a 2D simplex. Compared to the convex hull method, our probability interval systems exhibit a more conservative nature. Another practical difference is that the convex hull method is highly computationally com- plex, preventing it from being practically implemented in multi-class classification tasks. In the following, we aim to explain the associated complexity of the calculation process. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/figure_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890924/table-1-performance-comparison-between-the-classical-and"><img alt="Table 1: Performance comparison between the classical and credal wrapper version of BNN and DE, as well as EDD models. All models are implemented on VGG16/ResNet-18 backbones and trained using CIFAR10 data as ID samples. The results are from 15 runs. The best scores per metric are in bold. The results on corrupted data are averaged over all corruption types and intensities. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890930/table-2-ood-detection-auroc-and-auprc-performance-of-both"><img alt="Table 2: OOD detection AUROC and AUPRC performance (%) of both the classical and credal wrapper version of DEs using EU as the metric. The results are from 15 runs, based on the ResNet- 50 backbone. Best scores are in bold. datasets and model architectures due to the high computational complexity (Mukhoti et al., 2023). For instance, training a ResNet-50-based BNN on CIFAR- 0 (resized to (224, 224, 3)) failed in our experiment due to exceeding the memory capacity of a single Nvidia A100 GPU. The dataset pairs (ID vs OOD) considered include CIFAR10/CIFAR100 (Krizhevsky, 2012) vs SVHN/Tiny- ImageNet, ImageNet (Deng et al., 2009) vs ImageNet-O (Hendrycks et al., 2021), CIFAR10 vs CIFAR10-C, and CIFAR100 vs CIFAR100-C (Hendrycks & Dietterich, 2019). DEs are imple- mented on the well-established ResNet-50 (He et al., 2016). All input data have a shape of (224, 224, 3). More training details are given in Appendix §B. The PIA algorithm (Algorithm 1) is ap- plied using the settings J = 20 and J =50 to calculate the generalized entropy (H(P) and H(P)) on dataset pairs involving CIFAR100 and ImageNet, respectively. Compared to classical DEs, out credal wrapper demonstrates the enhanced OOD detection across a spectrum of data pairs, as shown in Table 2, suggesting that our proposed method can consistently improve EU estimation. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890944/table-4-ood-detection-performance-comparison-in-des"><img alt="Table 4: OOD detection performance (%) comparison in DEs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890955/table-6-ood-detection-comparisons-using-eu-of-vgg-based-bnns"><img alt="Table 6: OOD detection comparisons using EU (%) of VGG16-based BNNs. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890964/table-7-ood-detection-auroc-and-auprc-performance-comparison"><img alt="Table 7: OOD detection AUROC and AUPRC performance (%) comparison between classical and credal wrapper of BNNs and DEs using EU. The results are from 15 runs. Experimental Validation In this ablation study, we evaluate on EU estimation quality of our credal wrapper using GH(P) measure. Table 7 reports OOD detection performance tested on CIFARIO (ID) vs SVHN (OOD) and Tiny-ImageNet (OOD). All models are implemented on the VGG16 backbones. The results demonstrate that our credal wrapper consistently enhances EU estimation performance and is agnostic in the sense that it can accommodate any EU measure for credal sets. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890974/table-8-ood-detection-auroc-and-auprc-performance-comparison"><img alt="Table 8: OOD detection AUROC and AUPRC performance (%) comparison between the classical and credal wrapper version of BNNs and DEs, using TU as the uncertainty metric. All models are implemented on VGG16/ResNet-18 backbones and tested on CIFAR10 (ID) vs SVHN (OOD) and Tiny-ImageNet (OOD). The results are from 15 runs. The best scores are in bold. Table 9: OOD detection AUROC and AUPRC performance (%) of both the classical and credal wrapper version of DEs using TU as the metric. The results are from 15 runs, based on the ResNet- 30 backbone. The best scores are in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890987/table-7-credal-wrapper-of-model-averaging-for-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12890995/table-10-ood-detection-auroc-and-auprc-performance-of-both"><img alt="Table 10: OOD detection AUROC and AUPRC performance (%) of both the classical and credal wrapper version of DEs using TU as the metric. Results are from 15 runs, based on EffB2 and ViT-B backbones. The best scores are in bold. \.3. ABLATION STUDY ON OVERCONFIDENCE REGIME " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12891004/table-11-ablation-study-on-numbers-of-predictive-samples-in"><img alt="Table 11: Ablation study on numbers of predictive samples in DEs: OOD detection AUROC and AUPRC performance (%) of both the classical and credal wrapper version of DEs using TU as uncertainty metrics, involving CIFAR1O (ID) vs SVHN (OOD) and Tiny-ImageNet (OOD). The results are from 15 runs. The best scores are in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12891012/table-12-ablation-study-on-numbers-of-predictive-samples-in"><img alt="Table 12: Ablation study on numbers of predictive samples in BNNs: OOD detection AUROC and AUPRC performance (%) of both the classical and credal wrapper version of BNNs with increased number of samples, involving CIFARIO (ID) vs SVHN (OOD) and Tiny-ImageNet (OOD). The results are from 15 runs and the best scores per uncertainty metric are in bold. in the case of 3 classes, if there are three distinct extreme probability vectors (three vertices of the simplex), our credal wrapper method will effectively convey complete uncertainty, with the resulting credal set encompassing the entire simplex. This conservative nature can be sensible, as it expresses our full ignorance of the correct classification. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12891018/table-13-ood-detection-using-eu-left-and-tu-right-as"><img alt="Table 13: OOD detection using EU (left) and TU (right) as uncertainty metrics in overconfider scenarios. The results are from 15 runs, based on the ResNet-50 backbone. The best scores pe uncertainty metric are in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12891022/table-14-ood-detection-auroc-and-auprc-performance-of-credal"><img alt="Table 14: OOD detection AUROC and AUPRC performance (%) of credal wrapper of DEs using EU (left) and TU (right) as uncertainty metrics, and the time cost, using different setting of J of PIA algorithm. The OOD detection involves CIFAR100 (ID) vs SVHN (OOD) and Tiny-ImageNet (OOD). The results are from 15 runs, based on the ResNet-50 backbone. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/12891026/table-15-poor-id-prediction-and-ood-detection-performance-of"><img alt="Table 15: Poor ID prediction and OOD detection performance of EDD-Fair. CIFAR10 as ID data OOD Detection Process In this paper, the OOD detection process is treated as a binary classifica- tion. We label ID and OOD samples as 0 and 1, respectively. The model’s uncertainty estimation (using the EU or TU) for each sample is the ‘prediction’ for the detection. In terms of performance indicators, the applied AUROC quantifies the rates of true and false positives. The AUPRC evalu- ates precision and recall trade-offs, providing valuable insights into the model’s effectiveness across different confidence levels. " class="figure-slide-image" src="https://figures.academia-assets.com/121028826/table_013.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-127272202-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="f6dc3e71241070c74aebabecb5a47039" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121028826,"asset_id":127272202,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121028826/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272202"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272202"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272202; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272202]").text(description); $(".js-view-count[data-work-id=127272202]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272202; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272202']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "f6dc3e71241070c74aebabecb5a47039" } } $('.js-work-strip[data-work-id=127272202]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272202,"title":"Credal wrapper of model averaging for uncertainty estimation in classification","translated_title":"","metadata":{"abstract":"This paper presents an innovative approach, called credal wrapper, to formulating a credal set representation of model averaging for Bayesian neural networks (BNNs) and deep ensembles (DEs), capable of improving uncertainty estimation in classification tasks. Given a finite collection of single predictive distributions derived from BNNs or DEs, the proposed credal wrapper approach extracts an upper and a lower probability bound per class, acknowledging the epistemic uncertainty due to the availability of a limited amount of distributions. Such probability intervals over classes can be mapped on a convex set of probabilities (a credal set) from which, in turn, a unique prediction can be obtained using a transformation called intersection probability transformation. In this article, we conduct extensive experiments on several out-of-distribution (OOD) detection benchmarks, encompassing various dataset pairs (CIFAR10/100 vs SVHN/Tiny-ImageNet, CI-FAR10 vs CIFAR10-C, CIFAR100 vs CIFAR100-C and ImageNet vs ImageNet-O) and using different network architectures (such as VGG16, ResNet-18/50, Ef-ficientNet B2, and ViT Base). Compared to the BNN and DE baselines, the proposed credal wrapper method exhibits superior performance in uncertainty estimation and achieves a lower expected calibration error on corrupted data.","publication_date":{"day":null,"month":null,"year":2025,"errors":{}},"publication_name":"2025 International Conference on Learning Representations (ICLR 2025)"},"translated_abstract":"This paper presents an innovative approach, called credal wrapper, to formulating a credal set representation of model averaging for Bayesian neural networks (BNNs) and deep ensembles (DEs), capable of improving uncertainty estimation in classification tasks. Given a finite collection of single predictive distributions derived from BNNs or DEs, the proposed credal wrapper approach extracts an upper and a lower probability bound per class, acknowledging the epistemic uncertainty due to the availability of a limited amount of distributions. Such probability intervals over classes can be mapped on a convex set of probabilities (a credal set) from which, in turn, a unique prediction can be obtained using a transformation called intersection probability transformation. In this article, we conduct extensive experiments on several out-of-distribution (OOD) detection benchmarks, encompassing various dataset pairs (CIFAR10/100 vs SVHN/Tiny-ImageNet, CI-FAR10 vs CIFAR10-C, CIFAR100 vs CIFAR100-C and ImageNet vs ImageNet-O) and using different network architectures (such as VGG16, ResNet-18/50, Ef-ficientNet B2, and ViT Base). Compared to the BNN and DE baselines, the proposed credal wrapper method exhibits superior performance in uncertainty estimation and achieves a lower expected calibration error on corrupted data.","internal_url":"https://www.academia.edu/127272202/Credal_wrapper_of_model_averaging_for_uncertainty_estimation_in_classification","translated_internal_url":"","created_at":"2025-01-26T07:32:02.981-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":43025037,"work_id":127272202,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":1,"name":"Kaizheng Wang","title":"Credal wrapper of model averaging for uncertainty estimation in classification"},{"id":43025038,"work_id":127272202,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604285,"email":"1***5@brookes.ac.uk","display_order":2,"name":"Shireen Kudukkil Manchingal","title":"Credal wrapper of model averaging for uncertainty estimation in classification"},{"id":43025039,"work_id":127272202,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":3,"name":"Keivan Shariatmadar","title":"Credal wrapper of model averaging for uncertainty estimation in classification"},{"id":43025040,"work_id":127272202,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4480510,"email":"h***z@kuleuven.be","display_order":4,"name":"Hans Hallez","title":"Credal wrapper of model averaging for uncertainty estimation in classification"},{"id":43025041,"work_id":127272202,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053054,"email":"d***s@kuleuven.be","display_order":5,"name":"David Moens","title":"Credal wrapper of model averaging for uncertainty estimation in classification"}],"downloadable_attachments":[{"id":121028826,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028826/thumbnails/1.jpg","file_name":"5087_Credal_Wrapper_of_Model_A.pdf","download_url":"https://www.academia.edu/attachments/121028826/download_file","bulk_download_file_name":"Credal_wrapper_of_model_averaging_for_un.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028826/5087_Credal_Wrapper_of_Model_A-libre.pdf?1737909225=\u0026response-content-disposition=attachment%3B+filename%3DCredal_wrapper_of_model_averaging_for_un.pdf\u0026Expires=1743344601\u0026Signature=KpXyjTzFVIYNQnagqnhgli70C87iJA19QirMfd47z~xd7Glv9lCEADexzx-~-TR0KpVSrg~ND0hI-JZ9~H29fhcKXVWXR8yHhllLpUtLD6KbS2V63MlwXpLRONR8ouh7sLOX4ZHqOx5iUIeELLWLDNiPuC9P217pGFEjUA0Uhbh3NfL7KxmjBDYsDfpY8vza7Kr1VyONBuPcKZHkkiiR74bWEix5pmCJ7dhC7DNOmJD~ro~KJafNVkBCe-RYsuHPeCbZGacYAvQc20FjNZQY~Ef4NEFtLyDbhhS~QQvjyRnOv7-ADY2JXWrlqw5wX5DllUc8t48azRHg-kdXAOnvew__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Credal_wrapper_of_model_averaging_for_uncertainty_estimation_in_classification","translated_slug":"","page_count":25,"language":"en","content_type":"Work","summary":"This paper presents an innovative approach, called credal wrapper, to formulating a credal set representation of model averaging for Bayesian neural networks (BNNs) and deep ensembles (DEs), capable of improving uncertainty estimation in classification tasks. Given a finite collection of single predictive distributions derived from BNNs or DEs, the proposed credal wrapper approach extracts an upper and a lower probability bound per class, acknowledging the epistemic uncertainty due to the availability of a limited amount of distributions. Such probability intervals over classes can be mapped on a convex set of probabilities (a credal set) from which, in turn, a unique prediction can be obtained using a transformation called intersection probability transformation. In this article, we conduct extensive experiments on several out-of-distribution (OOD) detection benchmarks, encompassing various dataset pairs (CIFAR10/100 vs SVHN/Tiny-ImageNet, CI-FAR10 vs CIFAR10-C, CIFAR100 vs CIFAR100-C and ImageNet vs ImageNet-O) and using different network architectures (such as VGG16, ResNet-18/50, Ef-ficientNet B2, and ViT Base). Compared to the BNN and DE baselines, the proposed credal wrapper method exhibits superior performance in uncertainty estimation and achieves a lower expected calibration error on corrupted data.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121028826,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028826/thumbnails/1.jpg","file_name":"5087_Credal_Wrapper_of_Model_A.pdf","download_url":"https://www.academia.edu/attachments/121028826/download_file","bulk_download_file_name":"Credal_wrapper_of_model_averaging_for_un.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028826/5087_Credal_Wrapper_of_Model_A-libre.pdf?1737909225=\u0026response-content-disposition=attachment%3B+filename%3DCredal_wrapper_of_model_averaging_for_un.pdf\u0026Expires=1743344601\u0026Signature=KpXyjTzFVIYNQnagqnhgli70C87iJA19QirMfd47z~xd7Glv9lCEADexzx-~-TR0KpVSrg~ND0hI-JZ9~H29fhcKXVWXR8yHhllLpUtLD6KbS2V63MlwXpLRONR8ouh7sLOX4ZHqOx5iUIeELLWLDNiPuC9P217pGFEjUA0Uhbh3NfL7KxmjBDYsDfpY8vza7Kr1VyONBuPcKZHkkiiR74bWEix5pmCJ7dhC7DNOmJD~ro~KJafNVkBCe-RYsuHPeCbZGacYAvQc20FjNZQY~Ef4NEFtLyDbhhS~QQvjyRnOv7-ADY2JXWrlqw5wX5DllUc8t48azRHg-kdXAOnvew__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":41815,"name":"Applied Probability","url":"https://www.academia.edu/Documents/in/Applied_Probability"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":177212,"name":"Interval analysis","url":"https://www.academia.edu/Documents/in/Interval_analysis"},{"id":1587858,"name":"Confidence Interval","url":"https://www.academia.edu/Documents/in/Confidence_Interval"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-127272202-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="127272113"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/127272113/A_Unified_Evaluation_Framework_for_Epistemic_Predictions_Anonymous_Author_Anonymous_Institution"><img alt="Research paper thumbnail of A Unified Evaluation Framework for Epistemic Predictions Anonymous Author Anonymous Institution" class="work-thumbnail" src="https://attachments.academia-assets.com/121028812/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/127272113/A_Unified_Evaluation_Framework_for_Epistemic_Predictions_Anonymous_Author_Anonymous_Institution">A Unified Evaluation Framework for Epistemic Predictions Anonymous Author Anonymous Institution</a></div><div class="wp-workCard_item"><span>Artificial Intelligence and Statistics (AISTATS 2025)</span><span>, 2025</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Predictions of uncertainty-aware models are diverse, ranging from single point estimates (often a...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Predictions of uncertainty-aware models are diverse, ranging from single point estimates (often averaged over prediction samples) to predictive distributions, to set-valued or credal-set representations. We propose a novel unified evaluation framework for uncertaintyaware classifiers, applicable to a wide range of model classes, which allows users to tailor the trade-off between accuracy and precision of predictions via a suitably designed performance metric. This makes possible the selection of the most suitable model for a particular real-world application as a function of the desired trade-off. Our experiments, concerning Bayesian, ensemble, evidential, deterministic, credal and belief function classifiers on the CIFAR-10, MNIST and CIFAR-100 datasets, show that the metric behaves as desired.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-127272113-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-127272113-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548677/table-9-model-selection-based-on-evaluation-metric-using-kl"><img alt="Table 9: Model Selection Based on Evaluation Metric using KL Divergence and Non-Specificity on the CIFAR-100 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548409/figure-1-different-types-of-uncertainty-aware-model"><img alt="Figure 1: Different types of uncertainty-aware model predictions, shown in a unit simplex of probability distributions defined on the list of classes Y = {a, b,c}. Our proposed evaluation framework uses a metric which combines, for each input x, a distance (arrows) between the corresponding ground truth (e.g., (0,1,0)) and the epistemic predictions generated by the various models (in the form of credal sets), and a measure of the extent of the credal prediction (non-specificity). " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548414/figure-2-ww-measures-of-kl-divergence-top-left-non"><img alt="Ww Figure 2: Measures of KL divergence (top left), Non-specificity (top right), Evaluation Metric (bottom left) for both Correctly (CC) and Incorrectly Classified (ICC) samples from CIFAR-10, and Evaluation metric vs trade-off parameter (bottom right), for all models, on the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548671/table-7-unified-evaluation-framework-for-epistemic"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548424/figure-3-visualizations-of-prediction-samples-obtained-prior"><img alt="Figure 3: Visualizations of 100 prediction samples obtained prior to Bayesian Model Averaging and corresponding Bayesian Model Averaged prediction in two real scenarios from CIFAR-10. where 0 is the set of sampled weights, @g, (x) is the prediction made by the model with weights 0; for input x, and ® is the function for the model. This process is called Bayesian Model Averaging (BMA). Fig. |3} illustrates two contrasting scenarios in which BMA proves advantageous in the first case (top), yet exhibits limitations as it discards all information in the second case (bottom). This can limit a BNN’s ability to accurately represent complex uncertainty patterns, potentially undermining its effectiveness in scenarios requiring reliable uncertainty quantification. BMA may inadvertently smooth out predictive distributions, diluting the inherent uncertainty present in individual models (Hinne et al] [2020} |Graefe et al.| (2015) as shown in Fig. When applied to classification, BMA yields point-wise predictions. For fair comparison and to overcome BMA’s limitations, in this paper we also use sets of prediction samples obtained from the different posterior weights before averaging. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548428/figure-4-visualizations-of-belief-and-mass-predictions-on"><img alt="Figure 4: Visualizations of belief and mass predictions on the power-set space and its mapping to the label space Y using pignistic probabilities on the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548433/figure-5-probability-simplices-illustrating-the-convex"><img alt="Figure 5: Probability simplices illustrating the convex closure of predictions and credal sets for the Bayesian model (LB-BNN) across three classes of the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548439/figure-6-comparison-of-kullback-leibler-kl-divergence-and"><img alt="Figure 6: Comparison of (a) Kullback-Leibler (KL) divergence, and (b) Jensen-Shannon (JS) divergence fot Correctly Classified (CC) and Incorrectly Classified (ICC) samples from the CIFAR-10 dataset, for all models considered here. Notably, the scales of these two measures differ significantly (Y-axis). " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548447/figure-7-comparison-of-mean-evaluation-metric-using-mean"><img alt="Figure 7: Comparison of mean Evaluation Metric € using mean Kullback Leibler (KL) divergence (top) and mean Jensen-Shannon (JS) divergence (bottom) for Correct (left) and Incorrect (right) predictions of the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548451/figure-8-scatter-plots-showing-the-relationship-between"><img alt="Figure 8: Scatter plots showing the relationship between uncertainty (KL and J S divergences) and non-specificity for correct and incorrect predictions across models. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548458/figure-9-comparison-of-non-specificity-ns-and-credal"><img alt="Figure 9: Comparison of (a) Non-Specificity (NS), and (b) Credal Uncertainty (CU) for Correctly Classified (CC) and Incorrectly Classified (ICC) samples from the CIFAR-10 dataset, for all models considered here. Notably, the scales of these two measures differ (Y-axis). These optimization problems can be addressed using standard solvers, such as the SciPy optimization package Virtanen et al.||2020). " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548461/figure-10-comparison-of-mean-evaluation-metric-using-mean"><img alt="Figure 10: Comparison of mean Evaluation Metric € using mean Non-Specificity (top) and mean Credal Uncertainty (bottom) for Correct (left) and Incorrect (right) predictions of the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548465/figure-11-unified-evaluation-framework-for-epistemic"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548470/figure-12-unified-evaluation-framework-for-epistemic"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548477/figure-13-ablation-study-on-the-number-of-prediction-samples"><img alt="Figure 13: Ablation study on the number of prediction samples of LB-BNN and the number of ensembles of DE with Evaluation Metric (€). increases, with a more pronounced effect seen in DE compared to LB-BNN. Results are shown for LB-BNN wit] 50 to 500 samples and DE with 5 to 30 ensembles. The increase in the number of samples leads to a correspondin; increase in the size of the credal set, without apparently being compensated by lower KL divergence values. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548481/figure-14-credal-set-sizes-for-all-models-for-prediction"><img alt="Figure 14: Credal set sizes for all models for 50 prediction samples of the CIFAR-10 dataset. Larger credal set sizes indicate a more imprecise prediction. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_014.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548490/figure-15-credal-set-sizes-for-all-models-for-prediction"><img alt="Figure 15: Credal set sizes for all models for 50 prediction samples of the MNIST dataset. Larger credal set sizes indicate a more imprecise prediction " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_015.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548497/figure-16-credal-set-sizes-for-all-models-for-prediction"><img alt="Figure 16: Credal set sizes for all models for 50 prediction samples of the CIFAR-100 dataset. Larger credal set sizes indicate a more imprecise prediction " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_016.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548554/figure-17-credal-set-size-vs-non-specificity-heatmap-for-lb"><img alt="Figure 17: Credal Set Size vs. Non-Specificity heatmap for (a) LB-BNN, (b) Deep Ensembles (DE), (c) EDL, (d) CreINN, (e) E-CNN and (f) RS-NN on the CIFAR-10 dataset. Credal Set Size and Non-Specificity are directly correlated to each other. Log frequency is used to better showcase the trend. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_017.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548570/figure-18-credal-set-size-vs-non-specificity-heatmap-for-lb"><img alt="Figure 18: Credal Set Size vs. Non-Specificity heatmap for (a) LB-BNN, (b) Deep Ensembles (DE), (c) EDL, (d) CreINN, (e) E-CNN and (f) RS-NN on the MNIST dataset. Credal Set Size and Non-Specificity are directly correlated to each other. Log frequency is used to better showcase the trend. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_018.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548591/figure-19-credal-set-size-vs-non-specificity-heatmap-for-lb"><img alt="Figure 19: Credal Set Size vs. Non-Specificity heatmap for (a) LB-BNN, (b) Deep Ensembles (DE), (c) EDL, (d) CreINN and (e) RS-NN on the CIFAR-100 dataset. Credal Set Size and Non-Specificity are directly correlated to each other. Log frequency is used to better showcase the trend. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/figure_019.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548613/table-1-comparison-of-kullback-leibler-divergence-kl-non"><img alt="Table 1: Comparison of Kullback-Leibler divergence (KL), Non-Specificity (NS) and Evaluation Metric (€) for uncertainty-aware classifiers (trade-off \ = 1). Mean and std are shown for CIFAR-10, MNIST and CIFAR-100. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548627/table-3-comparison-of-kl-non-specificity-evaluation-metric"><img alt="Table 3: Comparison of KL, Non-Specificity, Evaluation Metric (€) calculated using approximated versus naive credal set vertices for LB-BNN and DE on the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548645/table-4-model-rankings-based-on-kl-and-js-divergence-on-the"><img alt="Table 4: Model Rankings Based on KL and JS Divergence on the CIFAR-10 dataset. Model selection is based on the mean of Evaluation Metric (€) with the models with the lowest € ranking first. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548653/table-5-model-rankings-based-on-non-specificity-ns-and"><img alt="Table 5: Model Rankings Based on Non-Specificity (NS) and Credal Uncertainty (CU) on the CIFAR-10 dataset. Model selection is based on the mean of Evaluation Metric (€) with the models with the lowest € ranking first. A The distance metric used here is KL divergence. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548658/table-6-comparison-of-kl-divergence-kl-non-specificity-ns"><img alt="Table 6: Comparison of KL divergence (KL), Non-Specificity (NS), and Evaluation Metric (€) (trade-off A = 1) for Correctly Classified (CC) and Incorrectly Classified (ICC) samples for each model on three datasets: CIFAR-10. MNIST, and CIFAR-100. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/7548665/table-8-model-selection-based-on-evaluation-metric-using-kl"><img alt="Table 8: Model Selection Based on Evaluation Metric using KL Divergence and Non-Specificity on the MNIST dataset. Table 7: Trade-off (A) vs Evaluation Metric (€) for different values of \ for the CIFAR-10 dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/121028812/table_006.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-127272113-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="afd88cd36c4999237bf730ba5e1ee813" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":121028812,"asset_id":127272113,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/121028812/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="127272113"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="127272113"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 127272113; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=127272113]").text(description); $(".js-view-count[data-work-id=127272113]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 127272113; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='127272113']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "afd88cd36c4999237bf730ba5e1ee813" } } $('.js-work-strip[data-work-id=127272113]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":127272113,"title":"A Unified Evaluation Framework for Epistemic Predictions Anonymous Author Anonymous Institution","translated_title":"","metadata":{"abstract":"Predictions of uncertainty-aware models are diverse, ranging from single point estimates (often averaged over prediction samples) to predictive distributions, to set-valued or credal-set representations. We propose a novel unified evaluation framework for uncertaintyaware classifiers, applicable to a wide range of model classes, which allows users to tailor the trade-off between accuracy and precision of predictions via a suitably designed performance metric. This makes possible the selection of the most suitable model for a particular real-world application as a function of the desired trade-off. Our experiments, concerning Bayesian, ensemble, evidential, deterministic, credal and belief function classifiers on the CIFAR-10, MNIST and CIFAR-100 datasets, show that the metric behaves as desired.","publication_date":{"day":null,"month":null,"year":2025,"errors":{}},"publication_name":"Artificial Intelligence and Statistics (AISTATS 2025)"},"translated_abstract":"Predictions of uncertainty-aware models are diverse, ranging from single point estimates (often averaged over prediction samples) to predictive distributions, to set-valued or credal-set representations. We propose a novel unified evaluation framework for uncertaintyaware classifiers, applicable to a wide range of model classes, which allows users to tailor the trade-off between accuracy and precision of predictions via a suitably designed performance metric. This makes possible the selection of the most suitable model for a particular real-world application as a function of the desired trade-off. Our experiments, concerning Bayesian, ensemble, evidential, deterministic, credal and belief function classifiers on the CIFAR-10, MNIST and CIFAR-100 datasets, show that the metric behaves as desired.","internal_url":"https://www.academia.edu/127272113/A_Unified_Evaluation_Framework_for_Epistemic_Predictions_Anonymous_Author_Anonymous_Institution","translated_internal_url":"","created_at":"2025-01-26T07:28:24.618-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":43025003,"work_id":127272113,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604285,"email":"1***5@brookes.ac.uk","display_order":1,"name":"Shireen Kudukkil Manchingal","title":"A Unified Evaluation Framework for Epistemic Predictions Anonymous Author Anonymous Institution"},{"id":43025004,"work_id":127272113,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":2,"name":"Kaizheng Wang","title":"A Unified Evaluation Framework for Epistemic Predictions Anonymous Author Anonymous Institution"}],"downloadable_attachments":[{"id":121028812,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028812/thumbnails/1.jpg","file_name":"AISTATS25_Submitted_version.pdf","download_url":"https://www.academia.edu/attachments/121028812/download_file","bulk_download_file_name":"A_Unified_Evaluation_Framework_for_Epist.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028812/AISTATS25_Submitted_version.pdf?1737905300=\u0026response-content-disposition=attachment%3B+filename%3DA_Unified_Evaluation_Framework_for_Epist.pdf\u0026Expires=1743344601\u0026Signature=Pb~ZBWxfydOat8BeJTxXuT2GOlETd7Gx7RPonnQgRu14VNGHFuR9r-aKKFTAaphHvl1dqfRu-vBzQtD911200jVyaeZBJZR48mBtTlC0dm8HDJX928XoFidK0smjLn-ibVbGVYA78lQNDKHabHBgikCi5SO8ZKk3M~MM5N1Tho7IUIYOzFSWOo2xUG~mXcOzWRl4RgadOv1GKyDPXBJRKpH7Pvl7CsTTrmiyRpQpi16YsZkOzypR5Dch7r0AV0xGrTZlv4rUtjobq3cnv67wzIeYEybhU0ky1FHgRQyc1PnBE5qbbB3iIbfvgy2AzuvSEXdLZLa7zpFmuxd6arX71w__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"A_Unified_Evaluation_Framework_for_Epistemic_Predictions_Anonymous_Author_Anonymous_Institution","translated_slug":"","page_count":33,"language":"en","content_type":"Work","summary":"Predictions of uncertainty-aware models are diverse, ranging from single point estimates (often averaged over prediction samples) to predictive distributions, to set-valued or credal-set representations. We propose a novel unified evaluation framework for uncertaintyaware classifiers, applicable to a wide range of model classes, which allows users to tailor the trade-off between accuracy and precision of predictions via a suitably designed performance metric. This makes possible the selection of the most suitable model for a particular real-world application as a function of the desired trade-off. Our experiments, concerning Bayesian, ensemble, evidential, deterministic, credal and belief function classifiers on the CIFAR-10, MNIST and CIFAR-100 datasets, show that the metric behaves as desired.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":121028812,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/121028812/thumbnails/1.jpg","file_name":"AISTATS25_Submitted_version.pdf","download_url":"https://www.academia.edu/attachments/121028812/download_file","bulk_download_file_name":"A_Unified_Evaluation_Framework_for_Epist.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/121028812/AISTATS25_Submitted_version.pdf?1737905300=\u0026response-content-disposition=attachment%3B+filename%3DA_Unified_Evaluation_Framework_for_Epist.pdf\u0026Expires=1743344601\u0026Signature=Pb~ZBWxfydOat8BeJTxXuT2GOlETd7Gx7RPonnQgRu14VNGHFuR9r-aKKFTAaphHvl1dqfRu-vBzQtD911200jVyaeZBJZR48mBtTlC0dm8HDJX928XoFidK0smjLn-ibVbGVYA78lQNDKHabHBgikCi5SO8ZKk3M~MM5N1Tho7IUIYOzFSWOo2xUG~mXcOzWRl4RgadOv1GKyDPXBJRKpH7Pvl7CsTTrmiyRpQpi16YsZkOzypR5Dch7r0AV0xGrTZlv4rUtjobq3cnv67wzIeYEybhU0ky1FHgRQyc1PnBE5qbbB3iIbfvgy2AzuvSEXdLZLa7zpFmuxd6arX71w__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":28512,"name":"Bayesian Networks","url":"https://www.academia.edu/Documents/in/Bayesian_Networks"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":41239,"name":"Bayesian statistics \u0026 modelling","url":"https://www.academia.edu/Documents/in/Bayesian_statistics_and_modelling"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-127272113-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="124827501"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/124827501/Neuroscience_for_AI_The_importance_of_Theory_of_Mind"><img alt="Research paper thumbnail of Neuroscience for AI: The importance of Theory of Mind" class="work-thumbnail" src="https://attachments.academia-assets.com/118981507/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/124827501/Neuroscience_for_AI_The_importance_of_Theory_of_Mind">Neuroscience for AI: The importance of Theory of Mind</a></div><div class="wp-workCard_item"><span>Developments in Neuroethics and Bioethics</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Understanding Theory of Mind is challenging as it can be viewed as a complex holistic process tha...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Understanding Theory of Mind is challenging as it can be viewed as a complex holistic process that can be decomposed into a number of hot and cold cognitive processes. Cold cognitive processes are non-emotional, whereas hot cognition is both social and emotional. Cold cognition includes working memory, cognitive flexibility and 'if-then' inferential logic and planning, processes which are used in non-social contexts, but which are often components of Theory of Mind tests. In social situations, we use our social cognition to process, remember and use information to explain and predict other people's behaviour, as well as our own. Therefore, strategic behaviour for goal achievement involving other people often relies on an interaction between hot and cold cognition. Similarly, for goal achievement in artificial intelligence (AI), for example robust performance in autonomous cars, or therapeutic interactions with humans, it is important to not only have the cold cognitive processes, which are well established in AI, but also the hot cognitive processes that require further development. This chapter will address hot cognitive processes, their underlying neural networks and how this information might be integrated in AI models to more successfully mimic the human brain and to enhance AI-human interactions. Finally, the importance of an integrated and interdisciplinary approach to AI models and the increasingly arising ethical issues in AI are discussed.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="289e5480205b5c4d3dcc29443ceef64b" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":118981507,"asset_id":124827501,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/118981507/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="124827501"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="124827501"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 124827501; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=124827501]").text(description); $(".js-view-count[data-work-id=124827501]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 124827501; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='124827501']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "289e5480205b5c4d3dcc29443ceef64b" } } $('.js-work-strip[data-work-id=124827501]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":124827501,"title":"Neuroscience for AI: The importance of Theory of Mind","translated_title":"","metadata":{"doi":"10.1016/bs.dnb.2024.03.001","volume":"7","abstract":"Understanding Theory of Mind is challenging as it can be viewed as a complex holistic process that can be decomposed into a number of hot and cold cognitive processes. Cold cognitive processes are non-emotional, whereas hot cognition is both social and emotional. Cold cognition includes working memory, cognitive flexibility and 'if-then' inferential logic and planning, processes which are used in non-social contexts, but which are often components of Theory of Mind tests. In social situations, we use our social cognition to process, remember and use information to explain and predict other people's behaviour, as well as our own. Therefore, strategic behaviour for goal achievement involving other people often relies on an interaction between hot and cold cognition. Similarly, for goal achievement in artificial intelligence (AI), for example robust performance in autonomous cars, or therapeutic interactions with humans, it is important to not only have the cold cognitive processes, which are well established in AI, but also the hot cognitive processes that require further development. This chapter will address hot cognitive processes, their underlying neural networks and how this information might be integrated in AI models to more successfully mimic the human brain and to enhance AI-human interactions. Finally, the importance of an integrated and interdisciplinary approach to AI models and the increasingly arising ethical issues in AI are discussed.","ai_title_tag":"Integrating Theory of Mind in AI Development","page_numbers":"65-83","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"Developments in Neuroethics and Bioethics"},"translated_abstract":"Understanding Theory of Mind is challenging as it can be viewed as a complex holistic process that can be decomposed into a number of hot and cold cognitive processes. Cold cognitive processes are non-emotional, whereas hot cognition is both social and emotional. Cold cognition includes working memory, cognitive flexibility and 'if-then' inferential logic and planning, processes which are used in non-social contexts, but which are often components of Theory of Mind tests. In social situations, we use our social cognition to process, remember and use information to explain and predict other people's behaviour, as well as our own. Therefore, strategic behaviour for goal achievement involving other people often relies on an interaction between hot and cold cognition. Similarly, for goal achievement in artificial intelligence (AI), for example robust performance in autonomous cars, or therapeutic interactions with humans, it is important to not only have the cold cognitive processes, which are well established in AI, but also the hot cognitive processes that require further development. This chapter will address hot cognitive processes, their underlying neural networks and how this information might be integrated in AI models to more successfully mimic the human brain and to enhance AI-human interactions. Finally, the importance of an integrated and interdisciplinary approach to AI models and the increasingly arising ethical issues in AI are discussed.","internal_url":"https://www.academia.edu/124827501/Neuroscience_for_AI_The_importance_of_Theory_of_Mind","translated_internal_url":"","created_at":"2024-10-18T01:14:39.543-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":42565046,"work_id":124827501,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5334584,"email":"f***n@inrialpes.fr","display_order":1,"name":"Fabio Cuzzolin","title":"Neuroscience for AI: The importance of Theory of Mind"},{"id":42565047,"work_id":124827501,"tagging_user_id":366407,"tagged_user_id":33434344,"co_author_invite_id":null,"email":"b***1@cam.ac.uk","display_order":2,"name":"Barbara Sahakian","title":"Neuroscience for AI: The importance of Theory of Mind"},{"id":42565048,"work_id":124827501,"tagging_user_id":366407,"tagged_user_id":107598522,"co_author_invite_id":null,"email":"l***e@gmail.com","display_order":3,"name":"Christelle Langley","title":"Neuroscience for AI: The importance of Theory of Mind"}],"downloadable_attachments":[{"id":118981507,"title":"","file_type":"docx","scribd_thumbnail_url":"https://attachments.academia-assets.com/118981507/thumbnails/1.jpg","file_name":"Neuroscience_for_AI_Chapter_Revisions_30.10.2023.docx","download_url":"https://www.academia.edu/attachments/118981507/download_file","bulk_download_file_name":"Neuroscience_for_AI_The_importance_of_Th.docx","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981507/Neuroscience_for_AI_Chapter_Revisions_30.10.2023.docx?1729239274=\u0026response-content-disposition=attachment%3B+filename%3DNeuroscience_for_AI_The_importance_of_Th.docx\u0026Expires=1743344601\u0026Signature=HY2HsROAM8mUbM~NRsA-ChddgCdlDQxlduYLBk48d80ZtqdE13QqCngccQfraX-fc9VFNgf7A1P5rV5r9jenopSgT8CXYH-HtYP2S0Va-N8TPwBRJ6A03-zkpRzsR0xjFm3TokMYf8ZPrFynNoXurdSy-iAc2GlESRWxVtKKNPCHLHVlL0FhR1y1lXKYMrAB1x8zW-2LYJW6uxqdBnyesmLC412amjamQZvuXPRNAJDVhIlJdzsdw9bSO28B3aBmBh6a~cNirNYt89kBWAgOEDhZR0rTGFom873C17FxN846wNNWpc3~NHGNX9ZGxQDQqsg0G5BzVec05-1I4gbupA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Neuroscience_for_AI_The_importance_of_Theory_of_Mind","translated_slug":"","page_count":14,"language":"en","content_type":"Work","summary":"Understanding Theory of Mind is challenging as it can be viewed as a complex holistic process that can be decomposed into a number of hot and cold cognitive processes. Cold cognitive processes are non-emotional, whereas hot cognition is both social and emotional. Cold cognition includes working memory, cognitive flexibility and 'if-then' inferential logic and planning, processes which are used in non-social contexts, but which are often components of Theory of Mind tests. In social situations, we use our social cognition to process, remember and use information to explain and predict other people's behaviour, as well as our own. Therefore, strategic behaviour for goal achievement involving other people often relies on an interaction between hot and cold cognition. Similarly, for goal achievement in artificial intelligence (AI), for example robust performance in autonomous cars, or therapeutic interactions with humans, it is important to not only have the cold cognitive processes, which are well established in AI, but also the hot cognitive processes that require further development. This chapter will address hot cognitive processes, their underlying neural networks and how this information might be integrated in AI models to more successfully mimic the human brain and to enhance AI-human interactions. Finally, the importance of an integrated and interdisciplinary approach to AI models and the increasingly arising ethical issues in AI are discussed.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":118981507,"title":"","file_type":"docx","scribd_thumbnail_url":"https://attachments.academia-assets.com/118981507/thumbnails/1.jpg","file_name":"Neuroscience_for_AI_Chapter_Revisions_30.10.2023.docx","download_url":"https://www.academia.edu/attachments/118981507/download_file","bulk_download_file_name":"Neuroscience_for_AI_The_importance_of_Th.docx","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981507/Neuroscience_for_AI_Chapter_Revisions_30.10.2023.docx?1729239274=\u0026response-content-disposition=attachment%3B+filename%3DNeuroscience_for_AI_The_importance_of_Th.docx\u0026Expires=1743344601\u0026Signature=HY2HsROAM8mUbM~NRsA-ChddgCdlDQxlduYLBk48d80ZtqdE13QqCngccQfraX-fc9VFNgf7A1P5rV5r9jenopSgT8CXYH-HtYP2S0Va-N8TPwBRJ6A03-zkpRzsR0xjFm3TokMYf8ZPrFynNoXurdSy-iAc2GlESRWxVtKKNPCHLHVlL0FhR1y1lXKYMrAB1x8zW-2LYJW6uxqdBnyesmLC412amjamQZvuXPRNAJDVhIlJdzsdw9bSO28B3aBmBh6a~cNirNYt89kBWAgOEDhZR0rTGFom873C17FxN846wNNWpc3~NHGNX9ZGxQDQqsg0G5BzVec05-1I4gbupA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":161,"name":"Neuroscience","url":"https://www.academia.edu/Documents/in/Neuroscience"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":4937,"name":"Theory of Mind","url":"https://www.academia.edu/Documents/in/Theory_of_Mind"},{"id":20053,"name":"Theory of Mind (Psychology)","url":"https://www.academia.edu/Documents/in/Theory_of_Mind_Psychology_"},{"id":20641,"name":"Future of artificial intelligence","url":"https://www.academia.edu/Documents/in/Future_of_artificial_intelligence"},{"id":25271,"name":"Artificial General Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_General_Intelligence"},{"id":74262,"name":"Philosophy of Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Philosophy_of_Artificial_Intelligence"}],"urls":[{"id":45208346,"url":"https://www.sciencedirect.com/science/article/pii/S2589295924000195"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-124827501-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="124827200"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/124827200/Feature_boosting_with_efficient_attention_for_scene_parsing"><img alt="Research paper thumbnail of Feature boosting with efficient attention for scene parsing" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/124827200/Feature_boosting_with_efficient_attention_for_scene_parsing">Feature boosting with efficient attention for scene parsing</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a> and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/VivekSingh2687">Vivek Singh</a></span></div><div class="wp-workCard_item"><span>Neurocomputing</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">The complexity of scene parsing grows with the number of object and scene classes, which is highe...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">The complexity of scene parsing grows with the number of object and scene classes, which is higher in unrestricted open scenes. The biggest challenge is to model the spatial relation between scene elements while succeeding in identifying objects at smaller scales. This paper presents a novel feature-boosting network that gathers spatial context from multiple levels of feature extraction and computes the attention weights for each level of representation to generate the final class labels. A novel 'channel attention module' is designed to compute the attention weights, ensuring that features from the relevant extraction stages are boosted while the others are attenuated. The model also learns spatial context information at low resolution to preserve the abstract spatial relationships among scene elements and reduce computation cost. Spatial attention is subsequently concatenated into a final feature set before applying feature boosting. Low-resolution spatial attention features are trained using an auxiliary task that helps learning a coarse global scene structure. The proposed model outperforms all state-ofthe-art models on both the ADE20K and the Cityscapes datasets.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-124827200-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-124827200-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659802/figure-1-samples-images-from-the-adek-dataset-zhou-et-al-to"><img alt="Figure 1: Samples images from the ADE20K dataset Zhou et al. (2017) to reflect the complexity of unre- stricted natural scenes. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659807/figure-2-complete-architecture-of-the-proposed-feature"><img alt="Figure 2: Complete architecture of the proposed Feature Boosting Network (FBNet). " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659813/figure-3-the-proposed-channel-attention-module-cam-used-in"><img alt="Figure 3: The proposed Channel Attention Module (CAM) used in FBNet. sification block which uses softmax to compute the class probabilities of each pixel. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659820/figure-4-plot-of-miou-achieved-versus-number-of-parameters"><img alt="Figure 4: Plot of (a) mIOU achieved versus number of parameters for different backbones in Table 2; (b mloU value against number of parameters for all the models in Table 4. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659831/figure-5-attention-maps-for-the-fbnet-trained-on-adek"><img alt="Figure 5: Attention maps for the FBNet trained on ADE20K dataset. First and second rows show attention maps for SAM (size: 64x86) and CAM (size: 128x171), respectively. Figure 5: Attention maps for the FBNet trained on ADE20K dataset. First and second rows show attention " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659840/figure-6-prediction-results-from-the-proposed-fbnet-against"><img alt="Figure 6: Prediction results from the proposed FBNet against the ground truth labels for a number of sample images from validation set of ADE20K dataset Zhou et al. (2017). The first row shows the original image, followed by the output of FBNet. The last row shows the ground truth label image. Figure 6: Prediction results from the proposed FBNet against the ground truth labels for a number of sample " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659848/table-1-ablation-study-evaluating-the-performance-of-fbnet"><img alt="Table 1: Ablation study evaluating the performance of FBNet under different combinations of its structural components. Table 1: Ablation study evaluating the performance of FBNet under different combinations of its structural " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/table_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659857/table-2-ablation-study-showing-the-performance-of-the"><img alt="Table 2: Ablation study showing the performance of the proposed model with different state-of-the-art back- bones. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/table_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659865/table-3-comparison-of-performance-against-state-of-the-art"><img alt="Table 3: Comparison of performance against state-of-the-art segmentation models on the ADE20k dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/table_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/40659873/table-4-comparison-of-performance-against-state-of-the-art"><img alt="Table 4: Comparison of performance against state-of-the-art segmentation models on the Cityscapes dataset. " class="figure-slide-image" src="https://figures.academia-assets.com/118981272/table_004.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-124827200-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="afb6c186cadd6dc2693a5b006ce3ab76" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":118981272,"asset_id":124827200,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/118981272/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="124827200"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="124827200"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 124827200; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=124827200]").text(description); $(".js-view-count[data-work-id=124827200]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 124827200; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='124827200']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "afb6c186cadd6dc2693a5b006ce3ab76" } } $('.js-work-strip[data-work-id=124827200]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":124827200,"title":"Feature boosting with efficient attention for scene parsing","translated_title":"","metadata":{"doi":"10.1016/j.neucom.2024.128222","volume":"601","abstract":"The complexity of scene parsing grows with the number of object and scene classes, which is higher in unrestricted open scenes. The biggest challenge is to model the spatial relation between scene elements while succeeding in identifying objects at smaller scales. This paper presents a novel feature-boosting network that gathers spatial context from multiple levels of feature extraction and computes the attention weights for each level of representation to generate the final class labels. A novel 'channel attention module' is designed to compute the attention weights, ensuring that features from the relevant extraction stages are boosted while the others are attenuated. The model also learns spatial context information at low resolution to preserve the abstract spatial relationships among scene elements and reduce computation cost. Spatial attention is subsequently concatenated into a final feature set before applying feature boosting. Low-resolution spatial attention features are trained using an auxiliary task that helps learning a coarse global scene structure. The proposed model outperforms all state-ofthe-art models on both the ADE20K and the Cityscapes datasets.","publisher":"Elsevier","page_numbers":"128222","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"Neurocomputing"},"translated_abstract":"The complexity of scene parsing grows with the number of object and scene classes, which is higher in unrestricted open scenes. The biggest challenge is to model the spatial relation between scene elements while succeeding in identifying objects at smaller scales. This paper presents a novel feature-boosting network that gathers spatial context from multiple levels of feature extraction and computes the attention weights for each level of representation to generate the final class labels. A novel 'channel attention module' is designed to compute the attention weights, ensuring that features from the relevant extraction stages are boosted while the others are attenuated. The model also learns spatial context information at low resolution to preserve the abstract spatial relationships among scene elements and reduce computation cost. Spatial attention is subsequently concatenated into a final feature set before applying feature boosting. Low-resolution spatial attention features are trained using an auxiliary task that helps learning a coarse global scene structure. The proposed model outperforms all state-ofthe-art models on both the ADE20K and the Cityscapes datasets.","internal_url":"https://www.academia.edu/124827200/Feature_boosting_with_efficient_attention_for_scene_parsing","translated_internal_url":"","created_at":"2024-10-18T01:02:39.803-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":42564994,"work_id":124827200,"tagging_user_id":366407,"tagged_user_id":329431652,"co_author_invite_id":8265708,"email":"v***h@plymouth.ac.uk","display_order":1,"name":"Vivek Singh","title":"Feature boosting with efficient attention for scene parsing"},{"id":42564995,"work_id":124827200,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8265709,"email":"s***a@leeds.ac.uk","display_order":2,"name":"Shailza Sharma","title":"Feature boosting with efficient attention for scene parsing"}],"downloadable_attachments":[{"id":118981272,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://a.academia-assets.com/images/blank-paper.jpg","file_name":"2402.19250v1.pdf","download_url":"https://www.academia.edu/attachments/118981272/download_file","bulk_download_file_name":"Feature_boosting_with_efficient_attentio.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981272/2402.19250v1-libre.pdf?1729240630=\u0026response-content-disposition=attachment%3B+filename%3DFeature_boosting_with_efficient_attentio.pdf\u0026Expires=1743344601\u0026Signature=Fz94PwvkAzHav614XxMlkr8mTQ8yMg1T6N0ic95g5id94PSAOiYqcXydksjpBsFlE6ysrcAvwdunhwmWTmzRv-gPD5S-I84HMmZ3fbeMKHOYg33MpWVzjyhthN2yJwy2LfI0RugnSQhK~tZZ1uD-~~~DXxk0vNHdHmrh9CKgRbIIikF1hsJl8lEi2TPlUvtwQqD3BPcTr4363k4KaLhqs~6jZ9PQLM81ZJ8i8Csl1ZQebmotAD3V5MjBjxeOL~DiBOWUSONKTfW5EbYw0bxioY~1uTSsB4l8~HhRs8Vd7n8ApPhlHJmK5qk8sk60NusW9-vzzudcIXQFONYFJ1fF~g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Feature_boosting_with_efficient_attention_for_scene_parsing","translated_slug":"","page_count":null,"language":"en","content_type":"Work","summary":"The complexity of scene parsing grows with the number of object and scene classes, which is higher in unrestricted open scenes. The biggest challenge is to model the spatial relation between scene elements while succeeding in identifying objects at smaller scales. This paper presents a novel feature-boosting network that gathers spatial context from multiple levels of feature extraction and computes the attention weights for each level of representation to generate the final class labels. A novel 'channel attention module' is designed to compute the attention weights, ensuring that features from the relevant extraction stages are boosted while the others are attenuated. The model also learns spatial context information at low resolution to preserve the abstract spatial relationships among scene elements and reduce computation cost. Spatial attention is subsequently concatenated into a final feature set before applying feature boosting. Low-resolution spatial attention features are trained using an auxiliary task that helps learning a coarse global scene structure. The proposed model outperforms all state-ofthe-art models on both the ADE20K and the Cityscapes datasets.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":118981272,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://a.academia-assets.com/images/blank-paper.jpg","file_name":"2402.19250v1.pdf","download_url":"https://www.academia.edu/attachments/118981272/download_file","bulk_download_file_name":"Feature_boosting_with_efficient_attentio.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981272/2402.19250v1-libre.pdf?1729240630=\u0026response-content-disposition=attachment%3B+filename%3DFeature_boosting_with_efficient_attentio.pdf\u0026Expires=1743344601\u0026Signature=Fz94PwvkAzHav614XxMlkr8mTQ8yMg1T6N0ic95g5id94PSAOiYqcXydksjpBsFlE6ysrcAvwdunhwmWTmzRv-gPD5S-I84HMmZ3fbeMKHOYg33MpWVzjyhthN2yJwy2LfI0RugnSQhK~tZZ1uD-~~~DXxk0vNHdHmrh9CKgRbIIikF1hsJl8lEi2TPlUvtwQqD3BPcTr4363k4KaLhqs~6jZ9PQLM81ZJ8i8Csl1ZQebmotAD3V5MjBjxeOL~DiBOWUSONKTfW5EbYw0bxioY~1uTSsB4l8~HhRs8Vd7n8ApPhlHJmK5qk8sk60NusW9-vzzudcIXQFONYFJ1fF~g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":26870,"name":"Image segmentation","url":"https://www.academia.edu/Documents/in/Image_segmentation"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[{"id":45208236,"url":"https://www.sciencedirect.com/science/article/pii/S0925231224009937"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-124827200-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="124810485"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/124810485/Uncertainty_measures_A_critical_survey"><img alt="Research paper thumbnail of Uncertainty measures: A critical survey" class="work-thumbnail" src="https://attachments.academia-assets.com/118981426/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/124810485/Uncertainty_measures_A_critical_survey">Uncertainty measures: A critical survey</a></div><div class="wp-workCard_item"><span>Information Fusion</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Classical probability is not the only mathematical theory of uncertainty, or the most general. Ma...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Classical probability is not the only mathematical theory of uncertainty, or the most general. Many authors have argued that probability theory is ill-equipped to model the 'epistemic', reducible uncertainty about the process generating the data. To address this, many alternative theories of uncertainty have been formulated. In this paper, we highlight how uncertainty theories can be seen as forming clusters characterised by a shared rationale, are connected to each other in an intricate but interesting way, and can be ranked according to their degree of generality. Our objective is to propose a structured, critical summary of the research landscape in uncertainty theory, and discuss its potential for wider adoption in artificial intelligence.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="c93124f2ca035b798fb0c7dae13309c7" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":118981426,"asset_id":124810485,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/118981426/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="124810485"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="124810485"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 124810485; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=124810485]").text(description); $(".js-view-count[data-work-id=124810485]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 124810485; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='124810485']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "c93124f2ca035b798fb0c7dae13309c7" } } $('.js-work-strip[data-work-id=124810485]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":124810485,"title":"Uncertainty measures: A critical survey","translated_title":"","metadata":{"doi":"10.1016/j.inffus.2024.102609","volume":"114","abstract":"Classical probability is not the only mathematical theory of uncertainty, or the most general. Many authors have argued that probability theory is ill-equipped to model the 'epistemic', reducible uncertainty about the process generating the data. To address this, many alternative theories of uncertainty have been formulated. In this paper, we highlight how uncertainty theories can be seen as forming clusters characterised by a shared rationale, are connected to each other in an intricate but interesting way, and can be ranked according to their degree of generality. Our objective is to propose a structured, critical summary of the research landscape in uncertainty theory, and discuss its potential for wider adoption in artificial intelligence.","page_numbers":"102609","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"Information Fusion"},"translated_abstract":"Classical probability is not the only mathematical theory of uncertainty, or the most general. Many authors have argued that probability theory is ill-equipped to model the 'epistemic', reducible uncertainty about the process generating the data. To address this, many alternative theories of uncertainty have been formulated. In this paper, we highlight how uncertainty theories can be seen as forming clusters characterised by a shared rationale, are connected to each other in an intricate but interesting way, and can be ranked according to their degree of generality. Our objective is to propose a structured, critical summary of the research landscape in uncertainty theory, and discuss its potential for wider adoption in artificial intelligence.","internal_url":"https://www.academia.edu/124810485/Uncertainty_measures_A_critical_survey","translated_internal_url":"","created_at":"2024-10-17T08:04:21.420-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":118981426,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118981426/thumbnails/1.jpg","file_name":"Information_Fusion_survey_revised_version.pdf","download_url":"https://www.academia.edu/attachments/118981426/download_file","bulk_download_file_name":"Uncertainty_measures_A_critical_survey.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981426/Information_Fusion_survey_revised_version-libre.pdf?1729240614=\u0026response-content-disposition=attachment%3B+filename%3DUncertainty_measures_A_critical_survey.pdf\u0026Expires=1743344602\u0026Signature=Z56LfSw0~pOAyOF64dmiZwH7ldO9HGBgF9GLet~l8i3IlVldtYZd5tI2NrztbV244dyA4avf8Iq0b7iGMQa-FlxV8EAUd1z5tNU5RyqplNMprHjVk39dJgcnsrCaplh7GH0VoW~IgylbCYzGU~14LyHNPUWq38WV84B2Dtfkz~XeTgf4vAV6cpACrRzoVYhR2v-h2PdLyQ6-i8SQzp2wGw-hIoZveOhRpfCq4ngfOlHSBJ1Kmd2UgY45LcBRD84pWk~SqWPt1b-2niwE-Hi6jytETFARIdhJgcfzTxu2iYaFZhJLju~CXKsP-6nNaHewJ-bBRej0UAQIfqDRDs-2-Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Uncertainty_measures_A_critical_survey","translated_slug":"","page_count":83,"language":"en","content_type":"Work","summary":"Classical probability is not the only mathematical theory of uncertainty, or the most general. Many authors have argued that probability theory is ill-equipped to model the 'epistemic', reducible uncertainty about the process generating the data. To address this, many alternative theories of uncertainty have been formulated. In this paper, we highlight how uncertainty theories can be seen as forming clusters characterised by a shared rationale, are connected to each other in an intricate but interesting way, and can be ranked according to their degree of generality. Our objective is to propose a structured, critical summary of the research landscape in uncertainty theory, and discuss its potential for wider adoption in artificial intelligence.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":118981426,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118981426/thumbnails/1.jpg","file_name":"Information_Fusion_survey_revised_version.pdf","download_url":"https://www.academia.edu/attachments/118981426/download_file","bulk_download_file_name":"Uncertainty_measures_A_critical_survey.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118981426/Information_Fusion_survey_revised_version-libre.pdf?1729240614=\u0026response-content-disposition=attachment%3B+filename%3DUncertainty_measures_A_critical_survey.pdf\u0026Expires=1743344602\u0026Signature=Z56LfSw0~pOAyOF64dmiZwH7ldO9HGBgF9GLet~l8i3IlVldtYZd5tI2NrztbV244dyA4avf8Iq0b7iGMQa-FlxV8EAUd1z5tNU5RyqplNMprHjVk39dJgcnsrCaplh7GH0VoW~IgylbCYzGU~14LyHNPUWq38WV84B2Dtfkz~XeTgf4vAV6cpACrRzoVYhR2v-h2PdLyQ6-i8SQzp2wGw-hIoZveOhRpfCq4ngfOlHSBJ1Kmd2UgY45LcBRD84pWk~SqWPt1b-2niwE-Hi6jytETFARIdhJgcfzTxu2iYaFZhJLju~CXKsP-6nNaHewJ-bBRej0UAQIfqDRDs-2-Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"},{"id":388873,"name":"Mathematics and Statistics","url":"https://www.academia.edu/Documents/in/Mathematics_and_Statistics"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"}],"urls":[{"id":45208261,"url":"https://www.sciencedirect.com/science/article/pii/S1566253524003877"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-124810485-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="124810395"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/124810395/Credal_Learning_Theory"><img alt="Research paper thumbnail of Credal Learning Theory" class="work-thumbnail" src="https://attachments.academia-assets.com/118967096/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/124810395/Credal_Learning_Theory">Credal Learning Theory</a></div><div class="wp-workCard_item"><span>2024 Conference on Neural Information Processing Systems (NeurIPS 2024)</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Statistical learning theory is the foundation of machine learning, providing theoretical bounds f...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learned from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not), as well as infinite model spaces, which directly generalize classical results.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="3046158ed1749f50f1b6cae0276d85c9" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":118967096,"asset_id":124810395,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/118967096/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="124810395"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="124810395"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 124810395; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=124810395]").text(description); $(".js-view-count[data-work-id=124810395]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 124810395; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='124810395']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "3046158ed1749f50f1b6cae0276d85c9" } } $('.js-work-strip[data-work-id=124810395]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":124810395,"title":"Credal Learning Theory","translated_title":"","metadata":{"abstract":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learned from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not), as well as infinite model spaces, which directly generalize classical results.","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"2024 Conference on Neural Information Processing Systems (NeurIPS 2024)"},"translated_abstract":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learned from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not), as well as infinite model spaces, which directly generalize classical results.","internal_url":"https://www.academia.edu/124810395/Credal_Learning_Theory","translated_internal_url":"","created_at":"2024-10-17T08:00:27.602-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":42562009,"work_id":124810395,"tagging_user_id":366407,"tagged_user_id":303284276,"co_author_invite_id":null,"email":"c***o@seas.upenn.edu","display_order":1,"name":"Michele Caprio","title":"Credal Learning Theory"},{"id":42562010,"work_id":124810395,"tagging_user_id":366407,"tagged_user_id":13178584,"co_author_invite_id":null,"email":"e***1@sheffield.ac.uk","affiliation":"The University of Sheffield","display_order":2,"name":"Eleni Elia","title":"Credal Learning Theory"}],"downloadable_attachments":[{"id":118967096,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118967096/thumbnails/1.jpg","file_name":"NeurIPS_2024_Credal_Learning_Theory.pdf","download_url":"https://www.academia.edu/attachments/118967096/download_file","bulk_download_file_name":"Credal_Learning_Theory.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118967096/NeurIPS_2024_Credal_Learning_Theory-libre.pdf?1729179331=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Learning_Theory.pdf\u0026Expires=1743344602\u0026Signature=TocZhlNYTOyeyLUFECKoUAhfp~dJ94segDBQtba21LC1OFZ53My2ibtWp0IkoJSgFyhRshFeUKSjGld7ueba4CFch6cyyBqmCpVoZOs0UEsKnHhU3I6JQlsfhMathMc5onbVCM5uGZJ0GflAhShSnIbOTGpx4w5yA~Cx6tcYdb0h4tm7ZdKM8~T~e0c9BIJfjlNd61rLZnXpAk9hNc~AAghRpzyKXshIG-33Tv7tJ5H8bfzh3UY~IFR8yYdFizxnESIQcfaqf35slvi3ZY5quUKhtOsUx-ix1uQ6BQ-bjbeI~p82bsjKHL6D0nSAgCimnteKpC-m2OtSWOWMtRNq~g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Credal_Learning_Theory","translated_slug":"","page_count":30,"language":"en","content_type":"Work","summary":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learned from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not), as well as infinite model spaces, which directly generalize classical results.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":118967096,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118967096/thumbnails/1.jpg","file_name":"NeurIPS_2024_Credal_Learning_Theory.pdf","download_url":"https://www.academia.edu/attachments/118967096/download_file","bulk_download_file_name":"Credal_Learning_Theory.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118967096/NeurIPS_2024_Credal_Learning_Theory-libre.pdf?1729179331=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Learning_Theory.pdf\u0026Expires=1743344602\u0026Signature=TocZhlNYTOyeyLUFECKoUAhfp~dJ94segDBQtba21LC1OFZ53My2ibtWp0IkoJSgFyhRshFeUKSjGld7ueba4CFch6cyyBqmCpVoZOs0UEsKnHhU3I6JQlsfhMathMc5onbVCM5uGZJ0GflAhShSnIbOTGpx4w5yA~Cx6tcYdb0h4tm7ZdKM8~T~e0c9BIJfjlNd61rLZnXpAk9hNc~AAghRpzyKXshIG-33Tv7tJ5H8bfzh3UY~IFR8yYdFizxnESIQcfaqf35slvi3ZY5quUKhtOsUx-ix1uQ6BQ-bjbeI~p82bsjKHL6D0nSAgCimnteKpC-m2OtSWOWMtRNq~g__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":20641,"name":"Future of artificial intelligence","url":"https://www.academia.edu/Documents/in/Future_of_artificial_intelligence"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":38246,"name":"Statistical Learning","url":"https://www.academia.edu/Documents/in/Statistical_Learning"},{"id":67968,"name":"Statistical Inference","url":"https://www.academia.edu/Documents/in/Statistical_Inference"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"}],"urls":[{"id":45208272,"url":"https://neurips.cc/virtual/2024/poster/96268"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-124810395-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="124810326"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/124810326/Credal_Deep_Ensembles_for_Uncertainty_Quantification"><img alt="Research paper thumbnail of Credal Deep Ensembles for Uncertainty Quantification" class="work-thumbnail" src="https://attachments.academia-assets.com/118967032/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/124810326/Credal_Deep_Ensembles_for_Uncertainty_Quantification">Credal Deep Ensembles for Uncertainty Quantification</a></div><div class="wp-workCard_item"><span>2024 Conference on Neural Information Processing Systems (NeurIPS 2024)</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">This paper introduces an innovative approach to classification called Credal Deep Ensembles (CreD...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">This paper introduces an innovative approach to classification called Credal Deep Ensembles (CreDEs), namely, ensembles of novel Credal-Set Neural Networks (CreNets). CreNets are trained to predict a lower and an upper probability bound for each class, which, in turn, determine a convex set of probabilities (credal set) on the class set. The training employs a loss inspired by distributionally robust optimization which simulates the potential divergence of the test distribution from the training distribution, in such a way that the width of the predicted probability interval reflects the 'epistemic' uncertainty about the future data distribution. Ensembles can be constructed by training multiple CreNets, each associated with a different random seed, and averaging the outputted intervals. Extensive experiments are conducted on various out-of-distributions (OOD) detection benchmarks (CIFAR10/100 vs SVHN/Tiny-ImageNet, CIFAR10 vs CIFAR10-C, ImageNet vs ImageNet-O) and using different network architectures (ResNet50, VGG16, and ViT Base). Compared to Deep Ensemble baselines, CreDEs demonstrate higher test accuracy, lower expected calibration error, and significantly improved epistemic uncertainty estimation.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-124810326-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-124810326-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658382/table-7-ood-detection-auroc-and-auprc-performance-between"><img alt="Table 7: OOD detection AUROC and AUPRC performance (%, +) between CreDEs-5 and DEs-5 based on ResNet50 using EU as uncertainty metrics on CIFAR10/100 vs. SVHN/Tiny-ImageNet anc ImageNet vs. ImageNet-O. Results are averaged over 15 runs. Best results are in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658387/table-8-auroc-and-auprc-scores-for-ood-detection-on-cifario"><img alt="Table 8: AUROC and AUPRC scores (%, t) for OOD detection on CIFARIO vs SVHN/Tiny- ImageNet. Results averaged over 15 runs. The Best results are in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658273/figure-1-extensive-experimental-validation-is-conducted-on"><img alt="Extensive experimental validation is conducted on several OOD detection benchmarks, including CI- FAR10/100 (ID) vs SVHN/Tiny-ImageNet (OOD), CIFAR10 (ID) vs CIFAR10-C (OOD), ImageNet (ID) vs ImageNet-O (OOD), and across different network architectures: ResNet50, VGG16 and Visual Transformer Base (ViT Base). Compared to traditional Deep Ensembles, our CreDEs achieve higher test accuracy and lower expected calibration error (ECE) on ID samples, and significantly improve the quality of EU estimation. Figure 1: Comparison between the proposed Credal Deep Ensembles and traditional Deep Ensem- bles. The former aggregate a collection of credal set predictions from CreNets as the final (credal) prediction, whereas the latter average a set of single probability distributions from standard SNNs as the outcome. E.g., in the probability simplex [16] associated with the target space Y = {A, B, D} (the triangle in the figure), a probability vector (q4, ge, dp) is represented as a single point. For each CreNet, the predicted lower and upper probabilities of each class act as constraints (parallel lines) which determine a credal prediction (in gray). Single credal predictions are aggregated as in Sec. 2.4. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658283/figure-2-wou-crenet-final-layer-structure-for-three-sures"><img alt="wou) Figure 2: CreNet final layer structure for three sures classes. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658393/table-9-ood-detection-auroc-and-auprc-performance-between"><img alt="Table 9: OOD detection AUROC and AUPRC performance (%, +) between CreDEs-5 and DEs-5 based on ResNet50 using TU as uncertainty metrics on CIFAR10/100 vs. SVHN/Tiny-ImageNet and ImageNet vs. ImageNet-O. Results are averaged over 15 runs. Best results in bold. Table 10: OOD detection AUROC and AUPRC performance (%, t) between CreDEs-5 and DEs-S based on VGG16 and ViT Base using TU as uncertainty metrics on CIFAR10 vs. SVHN/Tiny ImageNet. Results are averaged over 15 runs. Best results in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658400/table-11-we-compare-test-accuracy-and-ece-for-des-and-credes"><img alt="We compare test accuracy and ECE for DEs*-5 and CreDEs-5 in Table 11, and their OOD detection performance on the CIFAR10/100 (ID) vs SVHN/Tiny-ImageNet (OOD) benchmark in Table 12. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658406/table-11-test-accuracy-and-ece-of-des-and-credes-on-the"><img alt="Table 11: Test accuracy and ECE of DEs*-5 and CreDEs-5 on the CIFAR10 and CIFAR100 datasets Best results in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658376/table-5-the-full-gh-calculation-process-is-presented-in"><img alt="The full GH(Q) calculation process is presented in Algorithm 3. Although the use of probability intervals simplifies the calculation of GH(Q) in general, a significant challenge arises for large values of C (e.g. C =100) due to the complexity of involving subsets of C’. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658293/figure-5-maximum-reachable-upper-probability-max-qt-qu-per"><img alt="Figure 5: Maximum reachable upper probability max (qt, ; — qu.) per sample from 15 runs. Qualitative Evaluation Due to the high dimensionality, visualizing or directly computing the size of the credal set becomes challenging as C’ increases. Consequently, we indirectly evaluate whether ou CreDEs consistently generate nearly Dirac credal sets as predictions through the maximum attainable upper bound probability of the prediction. The closer this probability is to 1, the more it approximates a Dirac credal set. Figure 5 shows the results of ResNet50-based CreDEs-5 for the CIFAR10, SVHN and Tiny-ImageNet datasets. It verifies that our method does not consistently generate nearly Dirac credal sets, especially for OOD samples. For CIFAR10, a substantial proportion of (but not all) the credal sets are quasi-Dirac. This observation is reasonable as it is consistent with the high test accuracy of CreDEs and the low ECE reported in Table 1. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658301/figure-6-reliability-diagram-of-resnet-based-des-and-eu"><img alt="Figure 6: Reliability diagram of ResNet50-based DEs-5 and Figure 7: EU estimates compari- CreDEs-5 (using i;n4n and imax, fespectively) on CIFAR10. son of ResNet50-based models. Figure 8 compares OOD detection performance in the CIFAR1O vs CIFARIO-C setting against the intensity of corruption, using both AUPRC and AUROC as metrics. The results consistently demonstrate that CreDEs achieve higher test accuracy, lower ECE, and significantly improved epistemic uncertainty estimation, leading to enhanced OOD detection performance. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658309/table-3-test-accuracy-and-ece-of-des-and-credes-on-cifar-as"><img alt="Table 3: Test accuracy (%, +) and ECE (|) of DEs-5 and CreDEs-5 on CIFAR10 as ID dataset (left). AUROC and AUPRC scores (%, 1) for OOD detection on CIFAR10 vs SVHN/Tiny-ImageNet (right). Results averaged over 15 runs. The Best results are in bold. Figure 8: OOD detection on CIFAR10 vs CIFAR10-C against increased corruption intensity, using VGG16 and ViT Base as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658315/figure-9-ood-detection-on-cifar-vs-cifar-against-increased"><img alt="Figure 9: OOD detection on CIFAR10 vs CIFAR10-C against increased corruption intensity, using ResNet50, VGG16, and ViT Base as backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658323/figure-10-average-time-cost-of-gh-black-dotted-line-and-gh"><img alt="Figure 10: Average time cost of GH(Q) (black dotted line) and GH(Q) value per sample across various datasets (blue lines), along with the AUROC/AUPRC scores (green/purple lines) for OOD detection versus increasing values of Ix. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658331/figure-11-average-time-cost-average-value-per-sample-and-ood"><img alt="Figure 11: Average (Q) time cost, average H(Q) value per sample, and OOD performance on the OOD detection benchmark (CIFAR100 vs. SVHN/Tiny-ImageNet) for increasing values of K. The reported time cost is measured on a single Intel Xeon Gold 8358 CPU@2.6 GHz, without optimization in the calculation process. We believe a more efficient code implementation could significantly mitigate this. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658336/table-15-ood-detection-auroc-and-auprc-performance-between"><img alt="Table 15: OOD detection AUROC and AUPRC performance (%, +) between CreDEs-5 and Bayesian models based on ResNet50 using EU and TU as uncertainty metrics on CIFAR10 vs. SVHN/Tiny- ImageNet. Results are averaged over 15 runs. The best results are in bold. The ‘drop’ denotes the dropout rate applied to MCDropout. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658341/figure-13-averaged-training-and-validation-val-accuracy-for"><img alt="Figure 13: Averaged training and validation (Val) accuracy (%) for CreNets and SNNs over 15 runs. The U and L in the labels of CreNets represent accuracies associated with upper and lower probability bounds, namely 2 imax and 2 dmins respectively. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658344/figure-14-these-alternative-methods-are-illustrated-in"><img alt="These alternative methods are illustrated in Figure 14. Figure 14: Representation of three ensemble approaches: averaging (a), union (b), and intersection (c). In each subfigure, the ultimate credal set (highlighted in dark red) is formed by aggregating two individual credal sets, each constrained by probability intervals indicated in light green and blue. respectively. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658346/figure-15-empirical-evaluation-in-this-experiment-we-mainly"><img alt="Empirical evaluation In this experiment, we mainly evaluate the impact of averaging and union ensemble approaches on the EU estimation (GH(Q)) of CreDEs. Utilizing 15 individually trained ResNet50-based CreNets on CIFAR10 dataset, we formulate 15 CreDEs-M by varying the ensemble number M from 2 to 10 through averaging and union ensemble methodologies. Each kind of CreDEs- M is assessed for the averaged GH(Q) concerning samples and the quantity of CreDEs-M, and the averaged standard deviation (STD) of GH(Q) related to samples and the quantity of CreDEs-M. The results are plotted in Figure 15 (b) and (a), respectively. Besides, we also present the AUPRC and AUROC scores of OOD detection using GH(Q) as the uncertainty metric in Figure 15 (c) and (d), accordingly. Figure 15: Impact of averaging (Avg) and union on the EU estimation of CreDEs on OOD detectior benchmark involving CIFAR10 vs. SVHN/Tiny-ImageNet (TinyImage), implemented on ResNet5( architecture. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658350/figure-16-concept-of-credal-regressor-one-could-then-train"><img alt="Figure 16: Concept of a credal regressor. One could then train an ensemble of Bayesian regressor networks to predict a credal set with a fixed number of vertices (one network outputting one vertex probability) so that the final predicted credal set is the convex closure of those. Figure 16 illustrates the concept briefly. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/figure_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658352/table-1-eu-quantification-for-ood-detection-it-is-our"><img alt="EU Quantification for OOD Detection It is our hypothesis that OOD data express a higher EU. Hence, we can use a better EU quantification as the means to improve the OOD detec- Table 1: Test accuracy (%, +) and ECE (|) of DEs-5 and CreDEs-5 using CIFAR10, CIFAR100, and ImageNet as ID datasets over 15 runs. The better performance is marked in bold. Table | reports the test accuracy and ECE for DEs-5 and CreDEs-5 on the various datasets, indicating that our CreDEs-5 achieved higher test accuracy and lower ECE on ID samples. Note that employing the imin prediction showed higher ECE on the challenging ImageNet dataset. This is likely because the strategy, selecting the class with the highest lower reachable probability, is a conservative one. Table 1: Test accuracy (%, ¢) and ECE (|) of DEs-5 and CreDEs-5 using CIFAR10, CIFAR100, and 5 i: I a - oe en << . . n -/ : a OT Fee | " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658357/table-2-ood-detection-auroc-and-auprc-performance-between"><img alt="Table 2: OOD detection AUROC and AUPRC performance (%, +) between CreDEs-5 and DEs-5 based on ResNet50 using EU as uncertainty metrics on CIFAR10/100 vs. SVHN/Tiny-ImageNet and ImageNet vs. ImageNet-O. Results are averaged over 15 runs. Best results in bold. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658364/table-4-test-accuracy-and-ood-detection-performance-of"><img alt="Table 4: Test accuracy (%, +) and OOD detection performance (%, +) of CreDEs-5 using various 06. Results are averaged over 15 runs. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658370/table-5-averaged-eu-estimates-of-credes-using-various-model"><img alt="Table 5: Averaged EU estimates of CreDEs-5 using various 6. Model Inference Complexity Table 6 reports the parameter count and inference cost on one NVIDIA A100-SXM4-40GB GPU for a single SNN and CreNet on ImageNet. CreNets show a marginal increase in complexity due to its minor architectural modifications. More discussions on the inference and training complexity are presented in Appendix §C. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658414/table-12-ood-detection-performance-comparison-of-des-and"><img alt="Table 12: OOD detection performance comparison of DEs*-5 and CreDEs-5 using the dataset pairs CIFAR10/100 (ID) vs SVHN/Tiny-ImageNet (OOD). The reported results demonstrate that CreDEs-5 outperforms DEs*-5 ensembles by achieving higher test accuracy and lower ECE values. Concerning OOD detection tasks, it can be found that CreDEs in general improve the AUPRC and AUROC scores using either the TU or the EU metric, pretty much across the board. These results suggest that CreDEs provide higher-quality EU and TU estimation. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658418/table-13-test-accuracy-acc-and-ece-comparison-on-the-cifar"><img alt="Table 13: Test accuracy (ACC) (%) and ECE comparison on the CIFAR10 dataset, using the ResNet50 VGG16, and ViT Base architectures. Table 14: OOD detection performance comparison (%) on CIFAR10 vs SVHN/Tiny-ImageNet, using the ResNet50, VGG16, and ViT Base architectures. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658425/table-13-credes-des-des"><img alt="CreDEs-5 95.71+0.42 97.73+0.27 DEs-5 94.80+0.43 97.26+0.29 DEs?-5 93.90+0.24 96. 1040.21 189.02+0.10 88.02+0.15| 88.80+0.19 87.21+0.29 88.03+0.20 84.11+0.32 87.05+0.80 93.36+0.42 84.50+0.49 90.78+0.35 84.10+0.22 89.83+0.16 82.14+0.14 80.81+0.16) 79.40+0.1075.91+0.14} 78.11+0.08 72.23+0.16 87.30+1.77 92.24+1.15| 79.8041.75 87.9741.17 82.41+1.56 88.51+0.95 88.17+0.44 86.94+0.60 83.81+0.81 81.67+0.89 83.21+1.02 78.2441.17 " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658430/table-14-in-each-iteration-we-acquire-the-samples-with-the"><img alt="In each iteration, we acquire the 5 samples with the highest reported uncertainty estimates (EU or TU per model). After each step, we train models using the Adam optimizer for 20 epochs and select the one with the best accuracy from the validation set. AL process stops when the training set size reaches 150. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_014.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/53658433/table-16-complexity-comparison-between-resnet-based-snns-and"><img alt="Table 16: Complexity comparison between ResNet50-based SNNs and CreNets using CIFAR10/100 datasets. The inference cost per dataset is measured by a single NVIDIA P100 SXM2-16GB GPU for both models. Table 17: Inference cost comparison on CPU between SNNs and CreNets per single CIFAR 10 input of different architectures. " class="figure-slide-image" src="https://figures.academia-assets.com/118967032/table_015.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-124810326-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="a719680407fb714460de77499a74a14f" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":118967032,"asset_id":124810326,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/118967032/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="124810326"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="124810326"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 124810326; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=124810326]").text(description); $(".js-view-count[data-work-id=124810326]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 124810326; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='124810326']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "a719680407fb714460de77499a74a14f" } } $('.js-work-strip[data-work-id=124810326]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":124810326,"title":"Credal Deep Ensembles for Uncertainty Quantification","translated_title":"","metadata":{"abstract":"This paper introduces an innovative approach to classification called Credal Deep Ensembles (CreDEs), namely, ensembles of novel Credal-Set Neural Networks (CreNets). CreNets are trained to predict a lower and an upper probability bound for each class, which, in turn, determine a convex set of probabilities (credal set) on the class set. The training employs a loss inspired by distributionally robust optimization which simulates the potential divergence of the test distribution from the training distribution, in such a way that the width of the predicted probability interval reflects the 'epistemic' uncertainty about the future data distribution. Ensembles can be constructed by training multiple CreNets, each associated with a different random seed, and averaging the outputted intervals. Extensive experiments are conducted on various out-of-distributions (OOD) detection benchmarks (CIFAR10/100 vs SVHN/Tiny-ImageNet, CIFAR10 vs CIFAR10-C, ImageNet vs ImageNet-O) and using different network architectures (ResNet50, VGG16, and ViT Base). Compared to Deep Ensemble baselines, CreDEs demonstrate higher test accuracy, lower expected calibration error, and significantly improved epistemic uncertainty estimation.","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"2024 Conference on Neural Information Processing Systems (NeurIPS 2024)"},"translated_abstract":"This paper introduces an innovative approach to classification called Credal Deep Ensembles (CreDEs), namely, ensembles of novel Credal-Set Neural Networks (CreNets). CreNets are trained to predict a lower and an upper probability bound for each class, which, in turn, determine a convex set of probabilities (credal set) on the class set. The training employs a loss inspired by distributionally robust optimization which simulates the potential divergence of the test distribution from the training distribution, in such a way that the width of the predicted probability interval reflects the 'epistemic' uncertainty about the future data distribution. Ensembles can be constructed by training multiple CreNets, each associated with a different random seed, and averaging the outputted intervals. Extensive experiments are conducted on various out-of-distributions (OOD) detection benchmarks (CIFAR10/100 vs SVHN/Tiny-ImageNet, CIFAR10 vs CIFAR10-C, ImageNet vs ImageNet-O) and using different network architectures (ResNet50, VGG16, and ViT Base). Compared to Deep Ensemble baselines, CreDEs demonstrate higher test accuracy, lower expected calibration error, and significantly improved epistemic uncertainty estimation.","internal_url":"https://www.academia.edu/124810326/Credal_Deep_Ensembles_for_Uncertainty_Quantification","translated_internal_url":"","created_at":"2024-10-17T07:57:17.215-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":42561998,"work_id":124810326,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":1,"name":"Kaizheng Wang","title":"Credal Deep Ensembles for Uncertainty Quantification"},{"id":42561999,"work_id":124810326,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":2,"name":"Keivan Shariatmadar","title":"Credal Deep Ensembles for Uncertainty Quantification"},{"id":42562000,"work_id":124810326,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053054,"email":"d***s@kuleuven.be","display_order":3,"name":"David Moens","title":"Credal Deep Ensembles for Uncertainty Quantification"},{"id":42562001,"work_id":124810326,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4480510,"email":"h***z@kuleuven.be","display_order":4,"name":"Hans Hallez","title":"Credal Deep Ensembles for Uncertainty Quantification"}],"downloadable_attachments":[{"id":118967032,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118967032/thumbnails/1.jpg","file_name":"CreDEs_NeurIPS_2024_new_version.pdf","download_url":"https://www.academia.edu/attachments/118967032/download_file","bulk_download_file_name":"Credal_Deep_Ensembles_for_Uncertainty_Qu.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118967032/CreDEs_NeurIPS_2024_new_version-libre.pdf?1729179360=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Deep_Ensembles_for_Uncertainty_Qu.pdf\u0026Expires=1743344602\u0026Signature=BJkKPfZOvCZfe092a00VtMgBYzcXN55USkmG4G8E-voWb97OgumATUagFC1771VYexFEM~J10k5j7kfqqu-z9rS5H8PD5iDoQKjg1Nk8SAx79gW2Ee1dzLQChVQgaCc-h~ivg97NfnpBtl5ShWhJ8ezCN08ar9-hcUz5t~4f-2h5yImMPnQJKG89WNyCXcKw-PEFUa6p~OuA53bsi~QxOdgjN6rb10~smrz4p9iyKVdtfHiQ9ZGQXHP8uG2jbyf4OfcElES2AXPcUq5HbtKQdjDavJ6wTGQ~tkmnxOVTdAUUTgKojalEdhGmpWMRO605uFHhVzW2uNpACvC9kUCFDw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Credal_Deep_Ensembles_for_Uncertainty_Quantification","translated_slug":"","page_count":33,"language":"en","content_type":"Work","summary":"This paper introduces an innovative approach to classification called Credal Deep Ensembles (CreDEs), namely, ensembles of novel Credal-Set Neural Networks (CreNets). CreNets are trained to predict a lower and an upper probability bound for each class, which, in turn, determine a convex set of probabilities (credal set) on the class set. The training employs a loss inspired by distributionally robust optimization which simulates the potential divergence of the test distribution from the training distribution, in such a way that the width of the predicted probability interval reflects the 'epistemic' uncertainty about the future data distribution. Ensembles can be constructed by training multiple CreNets, each associated with a different random seed, and averaging the outputted intervals. Extensive experiments are conducted on various out-of-distributions (OOD) detection benchmarks (CIFAR10/100 vs SVHN/Tiny-ImageNet, CIFAR10 vs CIFAR10-C, ImageNet vs ImageNet-O) and using different network architectures (ResNet50, VGG16, and ViT Base). Compared to Deep Ensemble baselines, CreDEs demonstrate higher test accuracy, lower expected calibration error, and significantly improved epistemic uncertainty estimation.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":118967032,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/118967032/thumbnails/1.jpg","file_name":"CreDEs_NeurIPS_2024_new_version.pdf","download_url":"https://www.academia.edu/attachments/118967032/download_file","bulk_download_file_name":"Credal_Deep_Ensembles_for_Uncertainty_Qu.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/118967032/CreDEs_NeurIPS_2024_new_version-libre.pdf?1729179360=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Deep_Ensembles_for_Uncertainty_Qu.pdf\u0026Expires=1743344602\u0026Signature=BJkKPfZOvCZfe092a00VtMgBYzcXN55USkmG4G8E-voWb97OgumATUagFC1771VYexFEM~J10k5j7kfqqu-z9rS5H8PD5iDoQKjg1Nk8SAx79gW2Ee1dzLQChVQgaCc-h~ivg97NfnpBtl5ShWhJ8ezCN08ar9-hcUz5t~4f-2h5yImMPnQJKG89WNyCXcKw-PEFUa6p~OuA53bsi~QxOdgjN6rb10~smrz4p9iyKVdtfHiQ9ZGQXHP8uG2jbyf4OfcElES2AXPcUq5HbtKQdjDavJ6wTGQ~tkmnxOVTdAUUTgKojalEdhGmpWMRO605uFHhVzW2uNpACvC9kUCFDw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":11598,"name":"Neural Networks","url":"https://www.academia.edu/Documents/in/Neural_Networks"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":21593,"name":"Artificial Inteligence","url":"https://www.academia.edu/Documents/in/Artificial_Inteligence"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":41815,"name":"Applied Probability","url":"https://www.academia.edu/Documents/in/Applied_Probability"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[{"id":45208278,"url":"https://nips.cc/virtual/2024/poster/95324"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-124810326-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557703"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557703/Random_Set_Convolutional_Neural_Network_RS_CNN_for_Epistemic_Deep_Learning"><img alt="Research paper thumbnail of Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning" class="work-thumbnail" src="https://attachments.academia-assets.com/111225284/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557703/Random_Set_Convolutional_Neural_Network_RS_CNN_for_Epistemic_Deep_Learning">Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning</a></div><div class="wp-workCard_item"><span>arXiv:2307.05772</span><span>, 2023</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Machine learning is increasingly deployed in safety-critical domains where robustness against adv...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Machine learning is increasingly deployed in safety-critical domains where robustness against adversarial attacks is crucial and erroneous predictions could lead to potentially catastrophic consequences. This highlights the need for learning systems to be equipped with the means to determine a model's confidence in its prediction and the epistemic uncertainty associated with it, 'to know when a model does not know'. In this paper, we propose a novel Random-Set Convolutional Neural Network (RS-CNN) for classification which predicts belief functions rather than probability vectors over the set of classes, using the mathematics of random sets, i.e., distributions over the power set of the sample space. Based on the epistemic deep learning approach, random-set models are capable of representing the 'epistemic' uncertainty induced in machine learning by limited training sets. We estimate epistemic uncertainty by approximating the size of credal sets associated with the predicted belief functions, and experimentally demonstrate how our approach outperforms competing uncertainty-aware approaches in a classical evaluation setting. The performance of RS-CNN is best demonstrated on OOD samples where it manages to capture the true prediction while standard CNNs fail.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="917a88d58119ede78c8506efd54b93e6" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111225284,"asset_id":114557703,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111225284/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557703"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557703"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557703; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557703]").text(description); $(".js-view-count[data-work-id=114557703]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557703; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557703']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "917a88d58119ede78c8506efd54b93e6" } } $('.js-work-strip[data-work-id=114557703]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557703,"title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning","translated_title":"","metadata":{"doi":"10.48550/arXiv.2307.05772","abstract":"Machine learning is increasingly deployed in safety-critical domains where robustness against adversarial attacks is crucial and erroneous predictions could lead to potentially catastrophic consequences. This highlights the need for learning systems to be equipped with the means to determine a model's confidence in its prediction and the epistemic uncertainty associated with it, 'to know when a model does not know'. In this paper, we propose a novel Random-Set Convolutional Neural Network (RS-CNN) for classification which predicts belief functions rather than probability vectors over the set of classes, using the mathematics of random sets, i.e., distributions over the power set of the sample space. Based on the epistemic deep learning approach, random-set models are capable of representing the 'epistemic' uncertainty induced in machine learning by limited training sets. We estimate epistemic uncertainty by approximating the size of credal sets associated with the predicted belief functions, and experimentally demonstrate how our approach outperforms competing uncertainty-aware approaches in a classical evaluation setting. The performance of RS-CNN is best demonstrated on OOD samples where it manages to capture the true prediction while standard CNNs fail.","publication_date":{"day":null,"month":null,"year":2023,"errors":{}},"publication_name":"arXiv:2307.05772"},"translated_abstract":"Machine learning is increasingly deployed in safety-critical domains where robustness against adversarial attacks is crucial and erroneous predictions could lead to potentially catastrophic consequences. This highlights the need for learning systems to be equipped with the means to determine a model's confidence in its prediction and the epistemic uncertainty associated with it, 'to know when a model does not know'. In this paper, we propose a novel Random-Set Convolutional Neural Network (RS-CNN) for classification which predicts belief functions rather than probability vectors over the set of classes, using the mathematics of random sets, i.e., distributions over the power set of the sample space. Based on the epistemic deep learning approach, random-set models are capable of representing the 'epistemic' uncertainty induced in machine learning by limited training sets. We estimate epistemic uncertainty by approximating the size of credal sets associated with the predicted belief functions, and experimentally demonstrate how our approach outperforms competing uncertainty-aware approaches in a classical evaluation setting. The performance of RS-CNN is best demonstrated on OOD samples where it manages to capture the true prediction while standard CNNs fail.","internal_url":"https://www.academia.edu/114557703/Random_Set_Convolutional_Neural_Network_RS_CNN_for_Epistemic_Deep_Learning","translated_internal_url":"","created_at":"2024-02-06T08:37:25.416-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":41048584,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":1,"name":"Kaizheng Wang","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048585,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":2,"name":"Keivan Shariatmadar","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048586,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5334584,"email":"f***n@inrialpes.fr","display_order":4,"name":"Fabio Cuzzolin","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048587,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":1513371,"email":"p***9@brookes.ac.uk","display_order":5,"name":"Fabio Cuzzolin","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048588,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4909929,"email":"c***n@cs.ucla.edu","display_order":6,"name":"Fabio Cuzzolin","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048589,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5034152,"email":"f***n@gmail.com","display_order":7,"name":"Fabio Cuzzolin","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048590,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5334585,"email":"c***n@dei.unipd.it","display_order":8,"name":"Fabio Cuzzolin","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048591,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053056,"email":"k***g@columbia.edu","display_order":9,"name":"Kaizheng Wang","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048592,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053057,"email":"k***5@gmail.com","display_order":10,"name":"Kaizheng Wang","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048593,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053058,"email":"k***w@yahoo.com.cn","display_order":11,"name":"Kaizheng Wang","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048594,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":213887965,"co_author_invite_id":null,"email":"s***1@gmail.com","display_order":12,"name":"Muhammad Mubashar","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048595,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":41963407,"co_author_invite_id":null,"email":"m***1@gmail.com","display_order":13,"name":"M mubashar","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048596,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":280427958,"co_author_invite_id":null,"email":"m***d@gmail.com","display_order":14,"name":"Muhammad Mubashar","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048597,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":538842,"co_author_invite_id":null,"email":"k***t@gmail.com","affiliation":"Ghent University","display_order":15,"name":"Keivan Sh","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"},{"id":41048598,"work_id":114557703,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604285,"email":"1***5@brookes.ac.uk","display_order":16,"name":"Shireen Kudukkil Manchingal","title":"Random-Set Convolutional Neural Network (RS-CNN) for Epistemic Deep Learning"}],"downloadable_attachments":[{"id":111225284,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225284/thumbnails/1.jpg","file_name":"2307.05772.pdf","download_url":"https://www.academia.edu/attachments/111225284/download_file","bulk_download_file_name":"Random_Set_Convolutional_Neural_Network.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225284/2307.05772-libre.pdf?1707237984=\u0026response-content-disposition=attachment%3B+filename%3DRandom_Set_Convolutional_Neural_Network.pdf\u0026Expires=1743344602\u0026Signature=AZBEZfP898v3WBOovCc-fkMprMLber4XKD1fDtJ9egCmEynlmC7E8MANigzcPKIzGxLl24res4n-ZY8UQ0iP4aMlzoMcWlnSb72KqrgtEbo20JPuzWsQfGiQdIKUGGl-zpg156JeghunuQMPS-T8Kd4lZDKbqksvfGc-h3I2vY21J-rNzR4aFSWcbNIpyxB8PQPL0QpABUnscieO~EesmG-hvKCBxn8P6FJB5n1RVg1bt49hua2L0ulZTniPCPNdSE4z9uZrfKWJS-2WHcFtrOcBO2Q5fWWy-KhIqfgPUUNP196JnUKPWnLDueAY2~ib-cFiJMXlWZOWPn0tnSWo9Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Random_Set_Convolutional_Neural_Network_RS_CNN_for_Epistemic_Deep_Learning","translated_slug":"","page_count":19,"language":"en","content_type":"Work","summary":"Machine learning is increasingly deployed in safety-critical domains where robustness against adversarial attacks is crucial and erroneous predictions could lead to potentially catastrophic consequences. This highlights the need for learning systems to be equipped with the means to determine a model's confidence in its prediction and the epistemic uncertainty associated with it, 'to know when a model does not know'. In this paper, we propose a novel Random-Set Convolutional Neural Network (RS-CNN) for classification which predicts belief functions rather than probability vectors over the set of classes, using the mathematics of random sets, i.e., distributions over the power set of the sample space. Based on the epistemic deep learning approach, random-set models are capable of representing the 'epistemic' uncertainty induced in machine learning by limited training sets. We estimate epistemic uncertainty by approximating the size of credal sets associated with the predicted belief functions, and experimentally demonstrate how our approach outperforms competing uncertainty-aware approaches in a classical evaluation setting. The performance of RS-CNN is best demonstrated on OOD samples where it manages to capture the true prediction while standard CNNs fail.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":111225284,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225284/thumbnails/1.jpg","file_name":"2307.05772.pdf","download_url":"https://www.academia.edu/attachments/111225284/download_file","bulk_download_file_name":"Random_Set_Convolutional_Neural_Network.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225284/2307.05772-libre.pdf?1707237984=\u0026response-content-disposition=attachment%3B+filename%3DRandom_Set_Convolutional_Neural_Network.pdf\u0026Expires=1743344602\u0026Signature=AZBEZfP898v3WBOovCc-fkMprMLber4XKD1fDtJ9egCmEynlmC7E8MANigzcPKIzGxLl24res4n-ZY8UQ0iP4aMlzoMcWlnSb72KqrgtEbo20JPuzWsQfGiQdIKUGGl-zpg156JeghunuQMPS-T8Kd4lZDKbqksvfGc-h3I2vY21J-rNzR4aFSWcbNIpyxB8PQPL0QpABUnscieO~EesmG-hvKCBxn8P6FJB5n1RVg1bt49hua2L0ulZTniPCPNdSE4z9uZrfKWJS-2WHcFtrOcBO2Q5fWWy-KhIqfgPUUNP196JnUKPWnLDueAY2~ib-cFiJMXlWZOWPn0tnSWo9Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":94224,"name":"Theory of Evidence","url":"https://www.academia.edu/Documents/in/Theory_of_Evidence"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":492766,"name":"Random Sets","url":"https://www.academia.edu/Documents/in/Random_Sets"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-114557703-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557668"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557668/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks"><img alt="Research paper thumbnail of CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks" class="work-thumbnail" src="https://attachments.academia-assets.com/111225247/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557668/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks">CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks</a></div><div class="wp-workCard_item"><span>arXiv:2401.05043</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Uncertainty estimation is increasingly attractive for improving the reliability of neural network...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="c9c26a15cc317bcff0c879b2f5fe8a28" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111225247,"asset_id":114557668,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111225247/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557668"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557668"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557668; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557668]").text(description); $(".js-view-count[data-work-id=114557668]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557668; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557668']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "c9c26a15cc317bcff0c879b2f5fe8a28" } } $('.js-work-strip[data-work-id=114557668]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557668,"title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks","translated_title":"","metadata":{"doi":"10.48550/arXiv.2401.05043","abstract":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","ai_title_tag":"CreINNs: Interval Neural Networks for Uncertainty in Classification","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"arXiv:2401.05043"},"translated_abstract":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","internal_url":"https://www.academia.edu/114557668/CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks","translated_internal_url":"","created_at":"2024-02-06T08:34:40.698-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":41048571,"work_id":114557668,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053052,"email":"k***g@kuleuven.be","display_order":1,"name":"Kaizheng Wang","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":41048572,"work_id":114557668,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053053,"email":"k***r@kuleuven.be","display_order":2,"name":"Keivan Shariatmadar","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":41048573,"work_id":114557668,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053054,"email":"d***s@kuleuven.be","display_order":4,"name":"David Moens","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"},{"id":41048574,"work_id":114557668,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4480510,"email":"h***z@kuleuven.be","display_order":5,"name":"Hans Hallez","title":"CreINNs: Credal-Set Interval Neural Networks for Uncertainty Estimation in Classification Tasks"}],"downloadable_attachments":[{"id":111225247,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225247/thumbnails/1.jpg","file_name":"2401.05043.pdf","download_url":"https://www.academia.edu/attachments/111225247/download_file","bulk_download_file_name":"CreINNs_Credal_Set_Interval_Neural_Netwo.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225247/2401.05043-libre.pdf?1707237616=\u0026response-content-disposition=attachment%3B+filename%3DCreINNs_Credal_Set_Interval_Neural_Netwo.pdf\u0026Expires=1743344602\u0026Signature=asLc9aFVW7IX~g2BHOxR0LKrUxBNXXWg51ox1h71gUebct5NE-R6CRv6TzBAWXtrRR-LWyRBBlRUMW7BWvgam3EjfDtawcmqDdlkUaL7xskmqqcNmZIaFoY3Uv2Wcrb6-1hfzB4vP4A~jfbqXpXBVQWA~goWmV5lTrOPpgpQM48qPf9NQ9U5t-MlXxbH9XpIB9pewUqxc9yKCWmvzx4fpcyFdBPamTC5Gldg-PZkOyL1Vb~wdU-TMkyhkoB4gXFd49t-vIWaJUKNQU2f4ZYPqDEowrLlV0L2MvwzO3uxfecZJawNO0KUfHE6NieQNGsbjWVdgduHnrJlB4iU2BPcmQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"CreINNs_Credal_Set_Interval_Neural_Networks_for_Uncertainty_Estimation_in_Classification_Tasks","translated_slug":"","page_count":11,"language":"en","content_type":"Work","summary":"Uncertainty estimation is increasingly attractive for improving the reliability of neural networks. In this work, we present novel credal-set interval neural networks (CreINNs) designed for classification tasks. CreINNs preserve the traditional interval neural network structure, capturing weight uncertainty through deterministic intervals, while forecasting credal sets using the mathematical framework of probability intervals. Experimental validations on an out-of-distribution detection benchmark (CIFAR10 vs SVHN) showcase that CreINNs outperform epistemic uncertainty estimation when compared to variational Bayesian neural networks (BNNs) and deep ensembles (DEs). Furthermore, CreINNs exhibit a notable reduction in computational complexity compared to variational BNNs and demonstrate smaller model sizes than DEs.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":111225247,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225247/thumbnails/1.jpg","file_name":"2401.05043.pdf","download_url":"https://www.academia.edu/attachments/111225247/download_file","bulk_download_file_name":"CreINNs_Credal_Set_Interval_Neural_Netwo.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225247/2401.05043-libre.pdf?1707237616=\u0026response-content-disposition=attachment%3B+filename%3DCreINNs_Credal_Set_Interval_Neural_Netwo.pdf\u0026Expires=1743344602\u0026Signature=asLc9aFVW7IX~g2BHOxR0LKrUxBNXXWg51ox1h71gUebct5NE-R6CRv6TzBAWXtrRR-LWyRBBlRUMW7BWvgam3EjfDtawcmqDdlkUaL7xskmqqcNmZIaFoY3Uv2Wcrb6-1hfzB4vP4A~jfbqXpXBVQWA~goWmV5lTrOPpgpQM48qPf9NQ9U5t-MlXxbH9XpIB9pewUqxc9yKCWmvzx4fpcyFdBPamTC5Gldg-PZkOyL1Vb~wdU-TMkyhkoB4gXFd49t-vIWaJUKNQU2f4ZYPqDEowrLlV0L2MvwzO3uxfecZJawNO0KUfHE6NieQNGsbjWVdgduHnrJlB4iU2BPcmQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":1185,"name":"Image Processing","url":"https://www.academia.edu/Documents/in/Image_Processing"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":106145,"name":"Classification","url":"https://www.academia.edu/Documents/in/Classification"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-114557668-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557620"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557620/Reasoning_with_random_sets_An_agenda_for_the_future"><img alt="Research paper thumbnail of Reasoning with random sets: An agenda for the future" class="work-thumbnail" src="https://attachments.academia-assets.com/111225200/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557620/Reasoning_with_random_sets_An_agenda_for_the_future">Reasoning with random sets: An agenda for the future</a></div><div class="wp-workCard_item"><span>arXiv:2401.09435</span><span>, 2023</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">In this paper, we discuss a potential agenda for future work in the theory of random sets and bel...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">In this paper, we discuss a potential agenda for future work in the theory of random sets and belief functions, touching upon a number of focal issues: the development of a fully-fledged theory of statistical reasoning with random sets, including the generalisation of logistic regression and of the classical laws of probability; the further development of the geometric approach to uncertainty, to include general random sets, a wider range of uncertainty measures and alternative geometric representations; the application of this new theory to high-impact areas such as climate change, machine learning and statistical learning theory.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="c959d6bc2993888f55ed3f9804f34c53" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111225200,"asset_id":114557620,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111225200/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557620"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557620"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557620; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557620]").text(description); $(".js-view-count[data-work-id=114557620]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557620; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557620']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "c959d6bc2993888f55ed3f9804f34c53" } } $('.js-work-strip[data-work-id=114557620]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557620,"title":"Reasoning with random sets: An agenda for the future","translated_title":"","metadata":{"doi":"10.48550/arXiv.2401.09435","abstract":"In this paper, we discuss a potential agenda for future work in the theory of random sets and belief functions, touching upon a number of focal issues: the development of a fully-fledged theory of statistical reasoning with random sets, including the generalisation of logistic regression and of the classical laws of probability; the further development of the geometric approach to uncertainty, to include general random sets, a wider range of uncertainty measures and alternative geometric representations; the application of this new theory to high-impact areas such as climate change, machine learning and statistical learning theory.","publication_date":{"day":null,"month":null,"year":2023,"errors":{}},"publication_name":"arXiv:2401.09435"},"translated_abstract":"In this paper, we discuss a potential agenda for future work in the theory of random sets and belief functions, touching upon a number of focal issues: the development of a fully-fledged theory of statistical reasoning with random sets, including the generalisation of logistic regression and of the classical laws of probability; the further development of the geometric approach to uncertainty, to include general random sets, a wider range of uncertainty measures and alternative geometric representations; the application of this new theory to high-impact areas such as climate change, machine learning and statistical learning theory.","internal_url":"https://www.academia.edu/114557620/Reasoning_with_random_sets_An_agenda_for_the_future","translated_internal_url":"","created_at":"2024-02-06T08:32:14.973-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":111225200,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225200/thumbnails/1.jpg","file_name":"2401.09435.pdf","download_url":"https://www.academia.edu/attachments/111225200/download_file","bulk_download_file_name":"Reasoning_with_random_sets_An_agenda_for.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225200/2401.09435-libre.pdf?1707237623=\u0026response-content-disposition=attachment%3B+filename%3DReasoning_with_random_sets_An_agenda_for.pdf\u0026Expires=1743344603\u0026Signature=BeMfNW1rQ~iRlxkSZg9XRbEiERpPF31QAuiwv8HPTkMPatuS-~bnrQPw9Oxh6BeSikvxkKxA59s066QxCSza1HmestPjUTiQVS657T7ajykz~nrHQMwccld1j838IuyFtrQhPbofoNisWXhhc6NSkogUBTq2dsGe2uHJD-kj2Jtbltu~BsqiLVyDexYx0TPgjiIrZIAcgWCgrUJy3kB3vpf1EtGQpWbWxgCOn~G6hlP5Dj6bw4l4JYl-WiOHcTgdtyyRxtnFfQMdfslkFj00MITZOAqbv02vEwlX8Ycj2kV0Tyhh3DOQ9Vjtgp0SJ8M3JqgpfMwubdqg0NmERQ80wQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Reasoning_with_random_sets_An_agenda_for_the_future","translated_slug":"","page_count":94,"language":"en","content_type":"Work","summary":"In this paper, we discuss a potential agenda for future work in the theory of random sets and belief functions, touching upon a number of focal issues: the development of a fully-fledged theory of statistical reasoning with random sets, including the generalisation of logistic regression and of the classical laws of probability; the further development of the geometric approach to uncertainty, to include general random sets, a wider range of uncertainty measures and alternative geometric representations; the application of this new theory to high-impact areas such as climate change, machine learning and statistical learning theory.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":111225200,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225200/thumbnails/1.jpg","file_name":"2401.09435.pdf","download_url":"https://www.academia.edu/attachments/111225200/download_file","bulk_download_file_name":"Reasoning_with_random_sets_An_agenda_for.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225200/2401.09435-libre.pdf?1707237623=\u0026response-content-disposition=attachment%3B+filename%3DReasoning_with_random_sets_An_agenda_for.pdf\u0026Expires=1743344603\u0026Signature=BeMfNW1rQ~iRlxkSZg9XRbEiERpPF31QAuiwv8HPTkMPatuS-~bnrQPw9Oxh6BeSikvxkKxA59s066QxCSza1HmestPjUTiQVS657T7ajykz~nrHQMwccld1j838IuyFtrQhPbofoNisWXhhc6NSkogUBTq2dsGe2uHJD-kj2Jtbltu~BsqiLVyDexYx0TPgjiIrZIAcgWCgrUJy3kB3vpf1EtGQpWbWxgCOn~G6hlP5Dj6bw4l4JYl-WiOHcTgdtyyRxtnFfQMdfslkFj00MITZOAqbv02vEwlX8Ycj2kV0Tyhh3DOQ9Vjtgp0SJ8M3JqgpfMwubdqg0NmERQ80wQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":287095,"name":"Knowledge Representation and Reasoning","url":"https://www.academia.edu/Documents/in/Knowledge_Representation_and_Reasoning"},{"id":492766,"name":"Random Sets","url":"https://www.academia.edu/Documents/in/Random_Sets"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-114557620-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557528"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557528/Credal_Learning_Theory"><img alt="Research paper thumbnail of Credal Learning Theory" class="work-thumbnail" src="https://attachments.academia-assets.com/111225102/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557528/Credal_Learning_Theory">Credal Learning Theory</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a>, <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/MaryamSultana18">Maryam Sultana</a>, and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/MicheleCaprio">Michele Caprio</a></span></div><div class="wp-workCard_item"><span>arXiv:2402.00957</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Statistical learning theory is the foundation of machine learning, providing theoretical bounds f...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learnt from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not) as well as infinite model spaces, which directly generalize classical results.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-114557528-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-114557528-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/27673085/figure-1-graphical-representation-of-the-proposed-learning"><img alt="Figure 1: Graphical representation of the proposed learning framework. Given an available finite sample of training sets, each assumed to be generated by a single data distribution, one can learn a credal set P of data distributions in either a frequentist or subjectivist fashion (Section 3). This allows us to derive generalization bounds under credal uncertainty (Section 4). " class="figure-slide-image" src="https://figures.academia-assets.com/111225102/figure_001.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-114557528-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="ead3dbb563585f73fd506b907a5ac1eb" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111225102,"asset_id":114557528,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111225102/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557528"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557528"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557528; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557528]").text(description); $(".js-view-count[data-work-id=114557528]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557528; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557528']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "ead3dbb563585f73fd506b907a5ac1eb" } } $('.js-work-strip[data-work-id=114557528]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557528,"title":"Credal Learning Theory","translated_title":"","metadata":{"doi":"10.48550/arXiv.2402.00957","abstract":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learnt from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not) as well as infinite model spaces, which directly generalize classical results.","ai_title_tag":"Credal Learning Theory for Robust ML Models","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"arXiv:2402.00957"},"translated_abstract":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learnt from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not) as well as infinite model spaces, which directly generalize classical results.","internal_url":"https://www.academia.edu/114557528/Credal_Learning_Theory","translated_internal_url":"","created_at":"2024-02-06T08:27:55.672-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":41048551,"work_id":114557528,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5334584,"email":"f***n@inrialpes.fr","display_order":1,"name":"Fabio Cuzzolin","title":"Credal Learning Theory"},{"id":41048552,"work_id":114557528,"tagging_user_id":366407,"tagged_user_id":303286795,"co_author_invite_id":8053047,"email":"m***a@brookes.ac.uk","display_order":2,"name":"Maryam Sultana","title":"Credal Learning Theory"},{"id":41048553,"work_id":114557528,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053048,"email":"e***a@brookes.ac.uk","display_order":3,"name":"Eleni Elia","title":"Credal Learning Theory"},{"id":41048554,"work_id":114557528,"tagging_user_id":366407,"tagged_user_id":303284276,"co_author_invite_id":8053049,"email":"c***o@seas.upenn.edu","display_order":4,"name":"Michele Caprio","title":"Credal Learning Theory"}],"downloadable_attachments":[{"id":111225102,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225102/thumbnails/1.jpg","file_name":"2402.00957.pdf","download_url":"https://www.academia.edu/attachments/111225102/download_file","bulk_download_file_name":"Credal_Learning_Theory.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225102/2402.00957-libre.pdf?1707237627=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Learning_Theory.pdf\u0026Expires=1743344603\u0026Signature=Py8CtrTO-z~0WlK3kREJ-Vi302DBDzzg1Pe7eHCZL8Afxp9g~86cZ5LgWZ0b67Gts~A6jcapcqqRTRWEXzmxRwnnBI3IXVhylfMQ8IfLTrGxDViI6WyUop7T7F5UszmF~c-BWrQuyCYfECRpJl17o7CzXSxAWaAiFNEw1DzC2qJbZQDIkc9UKvmZuvRRoBlxHdhUTJdfJqR24y~hlI3GRNVVs1TDX6BBlGwslEdd~rCXis9vE51t6og1hUejLy24eJGeKCjSmD9WVyQyCUBPbvqMydyDbNw9OeGSydSIHW9EtDygoQh4B21CLg2wlUaWUcWTCLuGokF8LNglnk~PIA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Credal_Learning_Theory","translated_slug":"","page_count":14,"language":"en","content_type":"Work","summary":"Statistical learning theory is the foundation of machine learning, providing theoretical bounds for the risk of models learnt from a (single) training set, assumed to issue from an unknown probability distribution. In actual deployment, however, the data distribution may (and often does) vary, causing domain adaptation/generalization issues. In this paper we lay the foundations for a 'credal' theory of learning, using convex sets of probabilities (credal sets) to model the variability in the data-generating distribution. Such credal sets, we argue, may be inferred from a finite sample of training sets. Bounds are derived for the case of finite hypotheses spaces (both assuming realizability or not) as well as infinite model spaces, which directly generalize classical results.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":111225102,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111225102/thumbnails/1.jpg","file_name":"2402.00957.pdf","download_url":"https://www.academia.edu/attachments/111225102/download_file","bulk_download_file_name":"Credal_Learning_Theory.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111225102/2402.00957-libre.pdf?1707237627=\u0026response-content-disposition=attachment%3B+filename%3DCredal_Learning_Theory.pdf\u0026Expires=1743344603\u0026Signature=Py8CtrTO-z~0WlK3kREJ-Vi302DBDzzg1Pe7eHCZL8Afxp9g~86cZ5LgWZ0b67Gts~A6jcapcqqRTRWEXzmxRwnnBI3IXVhylfMQ8IfLTrGxDViI6WyUop7T7F5UszmF~c-BWrQuyCYfECRpJl17o7CzXSxAWaAiFNEw1DzC2qJbZQDIkc9UKvmZuvRRoBlxHdhUTJdfJqR24y~hlI3GRNVVs1TDX6BBlGwslEdd~rCXis9vE51t6og1hUejLy24eJGeKCjSmD9WVyQyCUBPbvqMydyDbNw9OeGSydSIHW9EtDygoQh4B21CLg2wlUaWUcWTCLuGokF8LNglnk~PIA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":37316,"name":"Statistical Learning Theory","url":"https://www.academia.edu/Documents/in/Statistical_Learning_Theory"},{"id":38246,"name":"Statistical Learning","url":"https://www.academia.edu/Documents/in/Statistical_Learning"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":388873,"name":"Mathematics and Statistics","url":"https://www.academia.edu/Documents/in/Mathematics_and_Statistics"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-114557528-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557478"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/114557478/Semantics_Driven_Generative_Replay_for_Few_Shot_Class_Incremental_Learning"><img alt="Research paper thumbnail of Semantics-Driven Generative Replay for Few-Shot Class Incremental Learning" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title">Semantics-Driven Generative Replay for Few-Shot Class Incremental Learning</div><div class="wp-workCard_item"><span>MM '22: Proceedings of the 30th ACM International Conference on Multimedia</span><span>, 2022</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">We deal with the problem of few-shot class incremental learning (FSCIL), which requires a model t...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">We deal with the problem of few-shot class incremental learning (FSCIL), which requires a model to continuously recognize new categories for which limited training data are available. Existing FSCIL methods depend on prior knowledge to regularize the model parameters for combating catastrophic forgetting. Devising an effective prior in a low-data regime, however, is not trivial. The memory-replay based approaches from the fully-supervised class incremental learning (CIL) literature cannot be used directly for FSCIL as the generative memory-replay modules of CIL are hard to train from few training samples. However, generative replay can tackle both the stability and plasticity of the models simultaneously by generating a large number of class-conditional samples. Convinced by this fact, we propose a generative modeling-based FSCIL framework using the paradigm of memory-replay in which a novel conditional few-shot generative adversarial network (GAN) is incrementally trained to produce visual features while ensuring the stability-plasticity trade-off through novel loss functions and combating the mode-collapse problem effectively. Furthermore, the class-specific synthesized visual features from the few-shot GAN are constrained to match the respective latent semantic prototypes obtained from a well-defined semantic space. We find that the advantages of this semantic restriction is two-fold, in dealing with forgetting, while making the features class-discernible. The model requires a single per-class prototype vector to be maintained in a dynamic memory buffer. Experimental results on the benchmark and large-scale CiFAR-100, CUB-200, and Mini-ImageNet confirm the superiority of our model over the current FSCIL state of the art.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557478"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557478"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557478; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557478]").text(description); $(".js-view-count[data-work-id=114557478]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557478; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557478']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=114557478]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557478,"title":"Semantics-Driven Generative Replay for Few-Shot Class Incremental Learning","translated_title":"","metadata":{"doi":"10.1145/3503161.3548160","abstract":"We deal with the problem of few-shot class incremental learning (FSCIL), which requires a model to continuously recognize new categories for which limited training data are available. Existing FSCIL methods depend on prior knowledge to regularize the model parameters for combating catastrophic forgetting. Devising an effective prior in a low-data regime, however, is not trivial. The memory-replay based approaches from the fully-supervised class incremental learning (CIL) literature cannot be used directly for FSCIL as the generative memory-replay modules of CIL are hard to train from few training samples. However, generative replay can tackle both the stability and plasticity of the models simultaneously by generating a large number of class-conditional samples. Convinced by this fact, we propose a generative modeling-based FSCIL framework using the paradigm of memory-replay in which a novel conditional few-shot generative adversarial network (GAN) is incrementally trained to produce visual features while ensuring the stability-plasticity trade-off through novel loss functions and combating the mode-collapse problem effectively. Furthermore, the class-specific synthesized visual features from the few-shot GAN are constrained to match the respective latent semantic prototypes obtained from a well-defined semantic space. We find that the advantages of this semantic restriction is two-fold, in dealing with forgetting, while making the features class-discernible. The model requires a single per-class prototype vector to be maintained in a dynamic memory buffer. Experimental results on the benchmark and large-scale CiFAR-100, CUB-200, and Mini-ImageNet confirm the superiority of our model over the current FSCIL state of the art.","publication_date":{"day":null,"month":null,"year":2022,"errors":{}},"publication_name":"MM '22: Proceedings of the 30th ACM International Conference on Multimedia"},"translated_abstract":"We deal with the problem of few-shot class incremental learning (FSCIL), which requires a model to continuously recognize new categories for which limited training data are available. Existing FSCIL methods depend on prior knowledge to regularize the model parameters for combating catastrophic forgetting. Devising an effective prior in a low-data regime, however, is not trivial. The memory-replay based approaches from the fully-supervised class incremental learning (CIL) literature cannot be used directly for FSCIL as the generative memory-replay modules of CIL are hard to train from few training samples. However, generative replay can tackle both the stability and plasticity of the models simultaneously by generating a large number of class-conditional samples. Convinced by this fact, we propose a generative modeling-based FSCIL framework using the paradigm of memory-replay in which a novel conditional few-shot generative adversarial network (GAN) is incrementally trained to produce visual features while ensuring the stability-plasticity trade-off through novel loss functions and combating the mode-collapse problem effectively. Furthermore, the class-specific synthesized visual features from the few-shot GAN are constrained to match the respective latent semantic prototypes obtained from a well-defined semantic space. We find that the advantages of this semantic restriction is two-fold, in dealing with forgetting, while making the features class-discernible. The model requires a single per-class prototype vector to be maintained in a dynamic memory buffer. Experimental results on the benchmark and large-scale CiFAR-100, CUB-200, and Mini-ImageNet confirm the superiority of our model over the current FSCIL state of the art.","internal_url":"https://www.academia.edu/114557478/Semantics_Driven_Generative_Replay_for_Few_Shot_Class_Incremental_Learning","translated_internal_url":"","created_at":"2024-02-06T08:26:29.265-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"Semantics_Driven_Generative_Replay_for_Few_Shot_Class_Incremental_Learning","translated_slug":"","page_count":null,"language":"en","content_type":"Work","summary":"We deal with the problem of few-shot class incremental learning (FSCIL), which requires a model to continuously recognize new categories for which limited training data are available. Existing FSCIL methods depend on prior knowledge to regularize the model parameters for combating catastrophic forgetting. Devising an effective prior in a low-data regime, however, is not trivial. The memory-replay based approaches from the fully-supervised class incremental learning (CIL) literature cannot be used directly for FSCIL as the generative memory-replay modules of CIL are hard to train from few training samples. However, generative replay can tackle both the stability and plasticity of the models simultaneously by generating a large number of class-conditional samples. Convinced by this fact, we propose a generative modeling-based FSCIL framework using the paradigm of memory-replay in which a novel conditional few-shot generative adversarial network (GAN) is incrementally trained to produce visual features while ensuring the stability-plasticity trade-off through novel loss functions and combating the mode-collapse problem effectively. Furthermore, the class-specific synthesized visual features from the few-shot GAN are constrained to match the respective latent semantic prototypes obtained from a well-defined semantic space. We find that the advantages of this semantic restriction is two-fold, in dealing with forgetting, while making the features class-discernible. The model requires a single per-class prototype vector to be maintained in a dynamic memory buffer. Experimental results on the benchmark and large-scale CiFAR-100, CUB-200, and Mini-ImageNet confirm the superiority of our model over the current FSCIL state of the art.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"}],"urls":[{"id":39239752,"url":"https://dl.acm.org/doi/abs/10.1145/3503161.3548160"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-114557478-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557325"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557325/Temporal_DINO_A_Self_supervised_Video_Strategy_to_Enhance_Action_Prediction"><img alt="Research paper thumbnail of Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction" class="work-thumbnail" src="https://attachments.academia-assets.com/111224952/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557325/Temporal_DINO_A_Self_supervised_Video_Strategy_to_Enhance_Action_Prediction">Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a> and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/AndrewBradley42">Andrew Bradley</a></span></div><div class="wp-workCard_item"><span>2023 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)</span><span>, 2023</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">The emerging field of action prediction-the task of forecasting action in a video sequence-plays ...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">The emerging field of action prediction-the task of forecasting action in a video sequence-plays a vital role in various computer vision applications such as autonomous driving, activity analysis and human-computer interaction. Despite significant advancements, accurately predicting future actions remains a challenging problem due to high dimensionality, complex dynamics and uncertainties inherent in video data. Traditional supervised approaches require large amounts of labelled data, which is expensive and timeconsuming to obtain. This paper introduces a novel selfsupervised video strategy for enhancing action prediction inspired by DINO (self-distillation with no labels). The approach, named Temporal-DINO, employs two models; a 'student' processing past frames; and a 'teacher' processing both past and future frames, enabling a broader temporal context. During training, the teacher guides the student to learn future context by only observing past frames. The strategy is evaluated on ROAD dataset for the action prediction downstream task using 3D-ResNet, Transformer, and LSTM architectures. The experimental results showcase significant improvements in prediction performance across these architectures, with our method achieving an average enhancement of 9.9% Precision Points (PP), which highlights its effectiveness in enhancing the backbones' capabilities of capturing long-term dependencies. Furthermore, our approach demonstrates efficiency in terms of the pretraining dataset size and the number of epochs required. This method overcomes limitations present in other approaches, including the consideration of various backbone architectures, addressing multiple prediction horizons, reducing reliance on hand-crafted augmentations, and streamlining the pretraining process into a single stage. These findings highlight the potential of our approach in diverse video-based tasks such as activity recognition, motion planning, and scene understanding. Code can be found at <a href="https://github.com/IzzeddinTeeti/ssl" rel="nofollow">https://github.com/IzzeddinTeeti/ssl</a> pred.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-114557325-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-114557325-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/5694604/figure-1-overview-of-the-proposed-temporal-dino-the-student"><img alt="Figure 1: Overview of the proposed Temporal DINO. The student model processes the past frames (21.4), while the teacher processes both the past and future frames (1:2++p,..,) - A Future-past Distillation loss is applied to their representations (Sg and Ty) to guide the student to capture the future temporal context from the teacher. " class="figure-slide-image" src="https://figures.academia-assets.com/111224952/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/5694607/figure-2-temporal-dino-self-supervised-video-strategy-to"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/111224952/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/5694612/figure-3-temporal-dino-self-supervised-video-strategy-to"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/111224952/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/5694618/table-1-the-precision-of-different-backbones-with-varying"><img alt="Table 1: The precision of different backbones with varying input lengths under the three protocols mentioned in Section 4.3 Table 2: Evaluation of three common loss functions on R3D and Swin backbones. " class="figure-slide-image" src="https://figures.academia-assets.com/111224952/table_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/5694620/table-4-compared-to-those-pretrained-on-the-road-dataset"><img alt="compared to those pretrained on the ROAD dataset. More- over, the models that incorporate separate modelling of spa- tial and temporal relationships, such as ViT+LSTM and ResNet+LSTM, outperform the models that jointly model these relationships, namely R3D and Swin, by a margin of 16.6 and 5.7 percentage points, respectively. To have a better understanding of the generalisation ca- pability of the proposed model, we compare the perfor- mance of T-DINO with SOTA methods on another task, hu- man action recognition (on UCF101), summarised in Table 4. The results highlight the effectiveness of the enhanced temporal modelling offered by T-DINO when applied to the R3D backbone, " class="figure-slide-image" src="https://figures.academia-assets.com/111224952/table_002.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-114557325-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="816751e5643c79969a03a0bf2c7a95e0" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111224952,"asset_id":114557325,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111224952/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557325"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557325"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557325; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557325]").text(description); $(".js-view-count[data-work-id=114557325]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557325; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557325']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "816751e5643c79969a03a0bf2c7a95e0" } } $('.js-work-strip[data-work-id=114557325]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557325,"title":"Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction","translated_title":"","metadata":{"doi":"10.1109/ICCVW60793.2023.00352","abstract":"The emerging field of action prediction-the task of forecasting action in a video sequence-plays a vital role in various computer vision applications such as autonomous driving, activity analysis and human-computer interaction. Despite significant advancements, accurately predicting future actions remains a challenging problem due to high dimensionality, complex dynamics and uncertainties inherent in video data. Traditional supervised approaches require large amounts of labelled data, which is expensive and timeconsuming to obtain. This paper introduces a novel selfsupervised video strategy for enhancing action prediction inspired by DINO (self-distillation with no labels). The approach, named Temporal-DINO, employs two models; a 'student' processing past frames; and a 'teacher' processing both past and future frames, enabling a broader temporal context. During training, the teacher guides the student to learn future context by only observing past frames. The strategy is evaluated on ROAD dataset for the action prediction downstream task using 3D-ResNet, Transformer, and LSTM architectures. The experimental results showcase significant improvements in prediction performance across these architectures, with our method achieving an average enhancement of 9.9% Precision Points (PP), which highlights its effectiveness in enhancing the backbones' capabilities of capturing long-term dependencies. Furthermore, our approach demonstrates efficiency in terms of the pretraining dataset size and the number of epochs required. This method overcomes limitations present in other approaches, including the consideration of various backbone architectures, addressing multiple prediction horizons, reducing reliance on hand-crafted augmentations, and streamlining the pretraining process into a single stage. These findings highlight the potential of our approach in diverse video-based tasks such as activity recognition, motion planning, and scene understanding. Code can be found at https://github.com/IzzeddinTeeti/ssl pred.","publication_date":{"day":null,"month":null,"year":2023,"errors":{}},"publication_name":"2023 IEEE/CVF International Conference on Computer Vision Workshops (ICCVW)"},"translated_abstract":"The emerging field of action prediction-the task of forecasting action in a video sequence-plays a vital role in various computer vision applications such as autonomous driving, activity analysis and human-computer interaction. Despite significant advancements, accurately predicting future actions remains a challenging problem due to high dimensionality, complex dynamics and uncertainties inherent in video data. Traditional supervised approaches require large amounts of labelled data, which is expensive and timeconsuming to obtain. This paper introduces a novel selfsupervised video strategy for enhancing action prediction inspired by DINO (self-distillation with no labels). The approach, named Temporal-DINO, employs two models; a 'student' processing past frames; and a 'teacher' processing both past and future frames, enabling a broader temporal context. During training, the teacher guides the student to learn future context by only observing past frames. The strategy is evaluated on ROAD dataset for the action prediction downstream task using 3D-ResNet, Transformer, and LSTM architectures. The experimental results showcase significant improvements in prediction performance across these architectures, with our method achieving an average enhancement of 9.9% Precision Points (PP), which highlights its effectiveness in enhancing the backbones' capabilities of capturing long-term dependencies. Furthermore, our approach demonstrates efficiency in terms of the pretraining dataset size and the number of epochs required. This method overcomes limitations present in other approaches, including the consideration of various backbone architectures, addressing multiple prediction horizons, reducing reliance on hand-crafted augmentations, and streamlining the pretraining process into a single stage. These findings highlight the potential of our approach in diverse video-based tasks such as activity recognition, motion planning, and scene understanding. Code can be found at https://github.com/IzzeddinTeeti/ssl pred.","internal_url":"https://www.academia.edu/114557325/Temporal_DINO_A_Self_supervised_Video_Strategy_to_Enhance_Action_Prediction","translated_internal_url":"","created_at":"2024-02-06T08:20:42.928-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":41048519,"work_id":114557325,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604221,"email":"i***i@brookes.ac.uk","display_order":1,"name":"Izzeddin Teeti","title":"Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction"},{"id":41048520,"work_id":114557325,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":8053041,"email":"v***h@brookes.ac.uk","display_order":2,"name":"Vivek Singh","title":"Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction"},{"id":41048521,"work_id":114557325,"tagging_user_id":366407,"tagged_user_id":302256440,"co_author_invite_id":4313614,"email":"a***y@brookes.ac.uk","display_order":3,"name":"Andrew Bradley","title":"Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction"},{"id":41048522,"work_id":114557325,"tagging_user_id":366407,"tagged_user_id":13800539,"co_author_invite_id":null,"email":"g***b@gmail.com","affiliation":"IIT Roorkee","display_order":4,"name":"Biplab Banerjee","title":"Temporal DINO: A Self-supervised Video Strategy to Enhance Action Prediction"}],"downloadable_attachments":[{"id":111224952,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111224952/thumbnails/1.jpg","file_name":"2308.04589.pdf","download_url":"https://www.academia.edu/attachments/111224952/download_file","bulk_download_file_name":"Temporal_DINO_A_Self_supervised_Video_St.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111224952/2308.04589-libre.pdf?1707237644=\u0026response-content-disposition=attachment%3B+filename%3DTemporal_DINO_A_Self_supervised_Video_St.pdf\u0026Expires=1743344603\u0026Signature=P8o1hNbAm-7SBvMt1nQ2S1EyVXAy0pkREOyvEHATvYOnhOGris1e~oXsoCLZVqZO~YfYqf7tvHHUgw5aLlY24Jefa3VnmNnhL1YQx36rWnp83ir2Kd2tbG35aurxsmQ15eLKENV1~79X07uGr-Ip1MDaJihxfsySh5idF57AciKpANfxnts6iB1s-8NOFZpilGd1LUQ0VEgvJ09R-NhLHbegQR5eaAC5wHIOMp4eRoyg7S4qJnIT23BHnZi59W-vuv7HStyQSC4mjEv5g2IlHPdCd8YVSolvOlUNWlZRTlP8Rg2R1AP6w2eJG70F8fOHIJjWi-viB6TDEOUN~xV9Sg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Temporal_DINO_A_Self_supervised_Video_Strategy_to_Enhance_Action_Prediction","translated_slug":"","page_count":11,"language":"en","content_type":"Work","summary":"The emerging field of action prediction-the task of forecasting action in a video sequence-plays a vital role in various computer vision applications such as autonomous driving, activity analysis and human-computer interaction. Despite significant advancements, accurately predicting future actions remains a challenging problem due to high dimensionality, complex dynamics and uncertainties inherent in video data. Traditional supervised approaches require large amounts of labelled data, which is expensive and timeconsuming to obtain. This paper introduces a novel selfsupervised video strategy for enhancing action prediction inspired by DINO (self-distillation with no labels). The approach, named Temporal-DINO, employs two models; a 'student' processing past frames; and a 'teacher' processing both past and future frames, enabling a broader temporal context. During training, the teacher guides the student to learn future context by only observing past frames. The strategy is evaluated on ROAD dataset for the action prediction downstream task using 3D-ResNet, Transformer, and LSTM architectures. The experimental results showcase significant improvements in prediction performance across these architectures, with our method achieving an average enhancement of 9.9% Precision Points (PP), which highlights its effectiveness in enhancing the backbones' capabilities of capturing long-term dependencies. Furthermore, our approach demonstrates efficiency in terms of the pretraining dataset size and the number of epochs required. This method overcomes limitations present in other approaches, including the consideration of various backbone architectures, addressing multiple prediction horizons, reducing reliance on hand-crafted augmentations, and streamlining the pretraining process into a single stage. These findings highlight the potential of our approach in diverse video-based tasks such as activity recognition, motion planning, and scene understanding. Code can be found at https://github.com/IzzeddinTeeti/ssl pred.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":111224952,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111224952/thumbnails/1.jpg","file_name":"2308.04589.pdf","download_url":"https://www.academia.edu/attachments/111224952/download_file","bulk_download_file_name":"Temporal_DINO_A_Self_supervised_Video_St.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111224952/2308.04589-libre.pdf?1707237644=\u0026response-content-disposition=attachment%3B+filename%3DTemporal_DINO_A_Self_supervised_Video_St.pdf\u0026Expires=1743344603\u0026Signature=P8o1hNbAm-7SBvMt1nQ2S1EyVXAy0pkREOyvEHATvYOnhOGris1e~oXsoCLZVqZO~YfYqf7tvHHUgw5aLlY24Jefa3VnmNnhL1YQx36rWnp83ir2Kd2tbG35aurxsmQ15eLKENV1~79X07uGr-Ip1MDaJihxfsySh5idF57AciKpANfxnts6iB1s-8NOFZpilGd1LUQ0VEgvJ09R-NhLHbegQR5eaAC5wHIOMp4eRoyg7S4qJnIT23BHnZi59W-vuv7HStyQSC4mjEv5g2IlHPdCd8YVSolvOlUNWlZRTlP8Rg2R1AP6w2eJG70F8fOHIJjWi-viB6TDEOUN~xV9Sg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":15665,"name":"Video Processing","url":"https://www.academia.edu/Documents/in/Video_Processing"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":1286170,"name":"Self-Supervised Learning","url":"https://www.academia.edu/Documents/in/Self-Supervised_Learning"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-114557325-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114557090"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/114557090/A_Hybrid_Graph_Network_for_Complex_Activity_Detection_in_Video"><img alt="Research paper thumbnail of A Hybrid Graph Network for Complex Activity Detection in Video" class="work-thumbnail" src="https://attachments.academia-assets.com/111224756/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/114557090/A_Hybrid_Graph_Network_for_Complex_Activity_Detection_in_Video">A Hybrid Graph Network for Complex Activity Detection in Video</a></div><div class="wp-workCard_item"><span>IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)</span><span>, 2024</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Interpretation and understanding of video presents a challenging computer vision task in numerous...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Interpretation and understanding of video presents a challenging computer vision task in numerous fields - e.g. autonomous driving and sports analytics. Existing approaches to interpreting the actions taking place within a video clip are based upon Temporal Action Localisation (TAL), which typically identifies short-term actions. The emerging field of Complex Activity Detection (CompAD) extends this analysis to long-term activities, with a deeper understanding obtained by modelling the internal structure of a complex activity taking place within the video. We address the CompAD problem using a hybrid graph neural network which combines attention applied to a graph encoding the local (short-term) dynamic scene with a temporal graph modelling the overall long-duration activity. Our approach is as follows: i) Firstly, we propose a novel feature extraction technique which, for each video snippet, generates spatiotemporal `tubes' for the active elements (`agents') in the (local) scene by detecting individual objects, tracking them and then extracting 3D features from all the agent tubes as well as the overall scene. ii) Next, we construct a local scene graph where each node (representing either an agent tube or the scene) is connected to all other nodes. Attention is then applied to this graph to obtain an overall representation of the local dynamic scene. iii) Finally, all local scene graph representations are interconnected via a temporal graph, to estimate the complex activity class together with its start and end time. The proposed framework outperforms all previous state-of-the-art methods on all three datasets including ActivityNet-1.3, Thumos-14, and ROAD.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="e54a1e3af37a68243ac3dea799223f12" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":111224756,"asset_id":114557090,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/111224756/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114557090"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114557090"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114557090; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114557090]").text(description); $(".js-view-count[data-work-id=114557090]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114557090; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114557090']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "e54a1e3af37a68243ac3dea799223f12" } } $('.js-work-strip[data-work-id=114557090]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114557090,"title":"A Hybrid Graph Network for Complex Activity Detection in Video","translated_title":"","metadata":{"doi":"10.48550/arXiv.2310.17493","abstract":"Interpretation and understanding of video presents a challenging computer vision task in numerous fields - e.g. autonomous driving and sports analytics. Existing approaches to interpreting the actions taking place within a video clip are based upon Temporal Action Localisation (TAL), which typically identifies short-term actions. The emerging field of Complex Activity Detection (CompAD) extends this analysis to long-term activities, with a deeper understanding obtained by modelling the internal structure of a complex activity taking place within the video. We address the CompAD problem using a hybrid graph neural network which combines attention applied to a graph encoding the local (short-term) dynamic scene with a temporal graph modelling the overall long-duration activity. Our approach is as follows: i) Firstly, we propose a novel feature extraction technique which, for each video snippet, generates spatiotemporal `tubes' for the active elements (`agents') in the (local) scene by detecting individual objects, tracking them and then extracting 3D features from all the agent tubes as well as the overall scene. ii) Next, we construct a local scene graph where each node (representing either an agent tube or the scene) is connected to all other nodes. Attention is then applied to this graph to obtain an overall representation of the local dynamic scene. iii) Finally, all local scene graph representations are interconnected via a temporal graph, to estimate the complex activity class together with its start and end time. The proposed framework outperforms all previous state-of-the-art methods on all three datasets including ActivityNet-1.3, Thumos-14, and ROAD.","ai_title_tag":"Hybrid Graph Network for Complex Activity Detection in Videos","publication_date":{"day":null,"month":null,"year":2024,"errors":{}},"publication_name":"IEEE/CVF Winter Conference on Applications of Computer Vision (WACV)"},"translated_abstract":"Interpretation and understanding of video presents a challenging computer vision task in numerous fields - e.g. autonomous driving and sports analytics. Existing approaches to interpreting the actions taking place within a video clip are based upon Temporal Action Localisation (TAL), which typically identifies short-term actions. The emerging field of Complex Activity Detection (CompAD) extends this analysis to long-term activities, with a deeper understanding obtained by modelling the internal structure of a complex activity taking place within the video. We address the CompAD problem using a hybrid graph neural network which combines attention applied to a graph encoding the local (short-term) dynamic scene with a temporal graph modelling the overall long-duration activity. Our approach is as follows: i) Firstly, we propose a novel feature extraction technique which, for each video snippet, generates spatiotemporal `tubes' for the active elements (`agents') in the (local) scene by detecting individual objects, tracking them and then extracting 3D features from all the agent tubes as well as the overall scene. ii) Next, we construct a local scene graph where each node (representing either an agent tube or the scene) is connected to all other nodes. Attention is then applied to this graph to obtain an overall representation of the local dynamic scene. iii) Finally, all local scene graph representations are interconnected via a temporal graph, to estimate the complex activity class together with its start and end time. The proposed framework outperforms all previous state-of-the-art methods on all three datasets including ActivityNet-1.3, Thumos-14, and ROAD.","internal_url":"https://www.academia.edu/114557090/A_Hybrid_Graph_Network_for_Complex_Activity_Detection_in_Video","translated_internal_url":"","created_at":"2024-02-06T08:14:47.233-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":41048513,"work_id":114557090,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604258,"email":"s***n@brookes.ac.uk","display_order":1,"name":"Salman Khan","title":"A Hybrid Graph Network for Complex Activity Detection in Video"},{"id":41048514,"work_id":114557090,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":3068318,"email":"p***5@brookes.ac.uk","display_order":2,"name":"Andrew Bradley","title":"A Hybrid Graph Network for Complex Activity Detection in Video"},{"id":41048515,"work_id":114557090,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":7604221,"email":"i***i@brookes.ac.uk","display_order":4,"name":"Izzeddin Teeti","title":"A Hybrid Graph Network for Complex Activity Detection in Video"}],"downloadable_attachments":[{"id":111224756,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111224756/thumbnails/1.jpg","file_name":"Khan_A_Hybrid_Graph_Network_for_Complex_Activity_Detection_in_Video_WACV_2024_paper.pdf","download_url":"https://www.academia.edu/attachments/111224756/download_file","bulk_download_file_name":"A_Hybrid_Graph_Network_for_Complex_Activ.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111224756/Khan_A_Hybrid_Graph_Network_for_Complex_Activity_Detection_in_Video_WACV_2024_paper-libre.pdf?1707237690=\u0026response-content-disposition=attachment%3B+filename%3DA_Hybrid_Graph_Network_for_Complex_Activ.pdf\u0026Expires=1743273935\u0026Signature=Kao~rbZ-z80MpbKB4Eg-ac3Fxrh7CMh1MGKc~wSFlhz-JMqbjmXnZImDB3OalJahb07YX-gBdh9z73iNHxCfSUy7xEbK~on1QYs27Is~JLRjdHI1RCAZKwPwzEf1KxZ3k7l5eXdxQNQMLSXCQeFtzFkLasTAgxtfI4SnhZhyjEeXq23TmT4TLUetRlA0Dv42oc2WuC4c-Zb5aI5DDCsQcgpSn7OMOYrC8yRLwz-UTeA6E1X1oIhqxT~kewgC1Vruz6R1IuBn34auCqGLuW3w-q9UeU1AW5deGMW6oC~a2dyCwgvN814yT0P3FI85UeJcmzwpVc2LFsEoRgCj2FmKEg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"A_Hybrid_Graph_Network_for_Complex_Activity_Detection_in_Video","translated_slug":"","page_count":11,"language":"en","content_type":"Work","summary":"Interpretation and understanding of video presents a challenging computer vision task in numerous fields - e.g. autonomous driving and sports analytics. Existing approaches to interpreting the actions taking place within a video clip are based upon Temporal Action Localisation (TAL), which typically identifies short-term actions. The emerging field of Complex Activity Detection (CompAD) extends this analysis to long-term activities, with a deeper understanding obtained by modelling the internal structure of a complex activity taking place within the video. We address the CompAD problem using a hybrid graph neural network which combines attention applied to a graph encoding the local (short-term) dynamic scene with a temporal graph modelling the overall long-duration activity. Our approach is as follows: i) Firstly, we propose a novel feature extraction technique which, for each video snippet, generates spatiotemporal `tubes' for the active elements (`agents') in the (local) scene by detecting individual objects, tracking them and then extracting 3D features from all the agent tubes as well as the overall scene. ii) Next, we construct a local scene graph where each node (representing either an agent tube or the scene) is connected to all other nodes. Attention is then applied to this graph to obtain an overall representation of the local dynamic scene. iii) Finally, all local scene graph representations are interconnected via a temporal graph, to estimate the complex activity class together with its start and end time. The proposed framework outperforms all previous state-of-the-art methods on all three datasets including ActivityNet-1.3, Thumos-14, and ROAD.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin","email":"b1oxRmVaTHVVeWVrcWNtL3c1U0k3dlUzQVI3ck42TzRFdzcrQ09GU3B5UkZzKzM4b2NFUnUxeGU4eERIQTRGRi0tRHBzbWF6dlN3RzBkWXM0WXNRV3pKUT09--e780c745791a66b20ed6c8125a5b759fdf94fe01"},"attachments":[{"id":111224756,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/111224756/thumbnails/1.jpg","file_name":"Khan_A_Hybrid_Graph_Network_for_Complex_Activity_Detection_in_Video_WACV_2024_paper.pdf","download_url":"https://www.academia.edu/attachments/111224756/download_file","bulk_download_file_name":"A_Hybrid_Graph_Network_for_Complex_Activ.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/111224756/Khan_A_Hybrid_Graph_Network_for_Complex_Activity_Detection_in_Video_WACV_2024_paper-libre.pdf?1707237690=\u0026response-content-disposition=attachment%3B+filename%3DA_Hybrid_Graph_Network_for_Complex_Activ.pdf\u0026Expires=1743273935\u0026Signature=Kao~rbZ-z80MpbKB4Eg-ac3Fxrh7CMh1MGKc~wSFlhz-JMqbjmXnZImDB3OalJahb07YX-gBdh9z73iNHxCfSUy7xEbK~on1QYs27Is~JLRjdHI1RCAZKwPwzEf1KxZ3k7l5eXdxQNQMLSXCQeFtzFkLasTAgxtfI4SnhZhyjEeXq23TmT4TLUetRlA0Dv42oc2WuC4c-Zb5aI5DDCsQcgpSn7OMOYrC8yRLwz-UTeA6E1X1oIhqxT~kewgC1Vruz6R1IuBn34auCqGLuW3w-q9UeU1AW5deGMW6oC~a2dyCwgvN814yT0P3FI85UeJcmzwpVc2LFsEoRgCj2FmKEg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":1185,"name":"Image Processing","url":"https://www.academia.edu/Documents/in/Image_Processing"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":15665,"name":"Video Processing","url":"https://www.academia.edu/Documents/in/Video_Processing"},{"id":39699,"name":"Probabilistic Graphical Models","url":"https://www.academia.edu/Documents/in/Probabilistic_Graphical_Models"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":315678,"name":"Human Action Recognition","url":"https://www.academia.edu/Documents/in/Human_Action_Recognition"},{"id":1433808,"name":"Convolutional Neural Networks","url":"https://www.academia.edu/Documents/in/Convolutional_Neural_Networks"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-114557090-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="114556507"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/114556507/DeepSmoke_Deep_learning_model_for_smoke_detection_and_segmentation_in_outdoor_environments"><img alt="Research paper thumbnail of DeepSmoke: Deep learning model for smoke detection and segmentation in outdoor environments" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title">DeepSmoke: Deep learning model for smoke detection and segmentation in outdoor environments</div><div class="wp-workCard_item"><span>Expert Systems with Applications</span><span>, 2021</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Fire disaster throughout the globe causes social, environmental, and economical damage, making it...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Fire disaster throughout the globe causes social, environmental, and economical damage, making its early detection and instant reporting essential for saving human lives and properties. Smoke detection plays a key role in early fire detection but majority of the existing methods are limited to either indoor or outdoor surveillance environments, with poor performance for hazy scenarios. In this paper, we present a Convolutional Neural Network (CNN)-based smoke detection and segmentation framework for both clear and hazy environments. Unlike existing methods, we employ an efficient CNN architecture, termed EfficientNet, for smoke detection with better accuracy. We also segment the smoke regions using DeepLabv3+, which is supported by effective encoders and decoders along with a pixel-wise classifier for optimum localization. Our smoke detection results evince a noticeable gain up to 3% in accuracy and a decrease of 0.46% in False Alarm Rate (FAR), while segmentation reports a significant increase of 2% and 1% in global accuracy and mean Intersection over Union (IoU) scores, respectively. This makes our method a best fit for smoke detection and segmentation in real-world surveillance settings.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="114556507"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="114556507"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 114556507; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=114556507]").text(description); $(".js-view-count[data-work-id=114556507]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 114556507; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='114556507']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=114556507]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":114556507,"title":"DeepSmoke: Deep learning model for smoke detection and segmentation in outdoor environments","translated_title":"","metadata":{"doi":"10.1016/j.eswa.2021.115125","abstract":"Fire disaster throughout the globe causes social, environmental, and economical damage, making its early detection and instant reporting essential for saving human lives and properties. Smoke detection plays a key role in early fire detection but majority of the existing methods are limited to either indoor or outdoor surveillance environments, with poor performance for hazy scenarios. In this paper, we present a Convolutional Neural Network (CNN)-based smoke detection and segmentation framework for both clear and hazy environments. Unlike existing methods, we employ an efficient CNN architecture, termed EfficientNet, for smoke detection with better accuracy. We also segment the smoke regions using DeepLabv3+, which is supported by effective encoders and decoders along with a pixel-wise classifier for optimum localization. Our smoke detection results evince a noticeable gain up to 3% in accuracy and a decrease of 0.46% in False Alarm Rate (FAR), while segmentation reports a significant increase of 2% and 1% in global accuracy and mean Intersection over Union (IoU) scores, respectively. This makes our method a best fit for smoke detection and segmentation in real-world surveillance settings.","publication_date":{"day":null,"month":null,"year":2021,"errors":{}},"publication_name":"Expert Systems with Applications"},"translated_abstract":"Fire disaster throughout the globe causes social, environmental, and economical damage, making its early detection and instant reporting essential for saving human lives and properties. Smoke detection plays a key role in early fire detection but majority of the existing methods are limited to either indoor or outdoor surveillance environments, with poor performance for hazy scenarios. In this paper, we present a Convolutional Neural Network (CNN)-based smoke detection and segmentation framework for both clear and hazy environments. Unlike existing methods, we employ an efficient CNN architecture, termed EfficientNet, for smoke detection with better accuracy. We also segment the smoke regions using DeepLabv3+, which is supported by effective encoders and decoders along with a pixel-wise classifier for optimum localization. Our smoke detection results evince a noticeable gain up to 3% in accuracy and a decrease of 0.46% in False Alarm Rate (FAR), while segmentation reports a significant increase of 2% and 1% in global accuracy and mean Intersection over Union (IoU) scores, respectively. This makes our method a best fit for smoke detection and segmentation in real-world surveillance settings.","internal_url":"https://www.academia.edu/114556507/DeepSmoke_Deep_learning_model_for_smoke_detection_and_segmentation_in_outdoor_environments","translated_internal_url":"","created_at":"2024-02-06T07:57:49.213-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[],"slug":"DeepSmoke_Deep_learning_model_for_smoke_detection_and_segmentation_in_outdoor_environments","translated_slug":"","page_count":null,"language":"en","content_type":"Work","summary":"Fire disaster throughout the globe causes social, environmental, and economical damage, making its early detection and instant reporting essential for saving human lives and properties. Smoke detection plays a key role in early fire detection but majority of the existing methods are limited to either indoor or outdoor surveillance environments, with poor performance for hazy scenarios. In this paper, we present a Convolutional Neural Network (CNN)-based smoke detection and segmentation framework for both clear and hazy environments. Unlike existing methods, we employ an efficient CNN architecture, termed EfficientNet, for smoke detection with better accuracy. We also segment the smoke regions using DeepLabv3+, which is supported by effective encoders and decoders along with a pixel-wise classifier for optimum localization. Our smoke detection results evince a noticeable gain up to 3% in accuracy and a decrease of 0.46% in False Alarm Rate (FAR), while segmentation reports a significant increase of 2% and 1% in global accuracy and mean Intersection over Union (IoU) scores, respectively. This makes our method a best fit for smoke detection and segmentation in real-world surveillance settings.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":7728,"name":"Object Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Object_Recognition_Computer_Vision_"},{"id":26870,"name":"Image segmentation","url":"https://www.academia.edu/Documents/in/Image_segmentation"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":762830,"name":"Object Detection","url":"https://www.academia.edu/Documents/in/Object_Detection"}],"urls":[{"id":39239172,"url":"https://www.sciencedirect.com/science/article/abs/pii/S0957417421005662"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-114556507-figures'); } }); </script> </div><div class="profile--tab_content_container js-tab-pane tab-pane" data-section-id="3674814" id="tutorialsandpresentations"><div class="js-work-strip profile--work_container" data-work-id="37181239"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/37181239/Belief_functions_A_gentle_introduction"><img alt="Research paper thumbnail of Belief functions: A gentle introduction" class="work-thumbnail" src="https://attachments.academia-assets.com/57132594/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/37181239/Belief_functions_A_gentle_introduction">Belief functions: A gentle introduction</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theor...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. Belief theory and the closely related random set theory form natural frameworks for modelling situations in which data are missing or scarce: think of extremely rare events such as volcanic eruptions or power plant meltdowns, problems subject to huge uncertainties due to the number and complexity of the factors involved (e.g. climate change), but also the all-important issue with generalisation from small training sets in machine learning. This tutorial is designed to introduce the principles and rationale of random sets and belief function theory to mainstream statisticians, mathematicians and working scientists, survey the key elements of the methodology and the most recent developments, make practitioners aware of the set of tools that have been developed for reasoning in the belief function framework on real-world problems. Attendees will acquire first-hand knowledge of how to apply these tools to significant problems in major application fields such as computer vision, climate change, and others. A research programme for the future of random set theory and high impact applications is eventually outlined.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="19d1c51f2b59334e552c25e2dbca042d" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":57132594,"asset_id":37181239,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/57132594/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="37181239"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="37181239"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 37181239; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=37181239]").text(description); $(".js-view-count[data-work-id=37181239]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 37181239; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='37181239']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "19d1c51f2b59334e552c25e2dbca042d" } } $('.js-work-strip[data-work-id=37181239]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":37181239,"title":"Belief functions: A gentle introduction","translated_title":"","metadata":{"abstract":"The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. Belief theory and the closely related random set theory form natural frameworks for modelling situations in which data are missing or scarce: think of extremely rare events such as volcanic eruptions or power plant meltdowns, problems subject to huge uncertainties due to the number and complexity of the factors involved (e.g. climate change), but also the all-important issue with generalisation from small training sets in machine learning. This tutorial is designed to introduce the principles and rationale of random sets and belief function theory to mainstream statisticians, mathematicians and working scientists, survey the key elements of the methodology and the most recent developments, make practitioners aware of the set of tools that have been developed for reasoning in the belief function framework on real-world problems. Attendees will acquire first-hand knowledge of how to apply these tools to significant problems in major application fields such as computer vision, climate change, and others. A research programme for the future of random set theory and high impact applications is eventually outlined.","ai_title_tag":"Introduction to Belief Functions and Applications in Science"},"translated_abstract":"The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. Belief theory and the closely related random set theory form natural frameworks for modelling situations in which data are missing or scarce: think of extremely rare events such as volcanic eruptions or power plant meltdowns, problems subject to huge uncertainties due to the number and complexity of the factors involved (e.g. climate change), but also the all-important issue with generalisation from small training sets in machine learning. This tutorial is designed to introduce the principles and rationale of random sets and belief function theory to mainstream statisticians, mathematicians and working scientists, survey the key elements of the methodology and the most recent developments, make practitioners aware of the set of tools that have been developed for reasoning in the belief function framework on real-world problems. Attendees will acquire first-hand knowledge of how to apply these tools to significant problems in major application fields such as computer vision, climate change, and others. A research programme for the future of random set theory and high impact applications is eventually outlined.","internal_url":"https://www.academia.edu/37181239/Belief_functions_A_gentle_introduction","translated_internal_url":"","created_at":"2018-08-04T14:33:45.622-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":57132594,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57132594/thumbnails/1.jpg","file_name":"Seoul2018.pdf","download_url":"https://www.academia.edu/attachments/57132594/download_file","bulk_download_file_name":"Belief_functions_A_gentle_introduction.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57132594/Seoul2018-libre.pdf?1533419774=\u0026response-content-disposition=attachment%3B+filename%3DBelief_functions_A_gentle_introduction.pdf\u0026Expires=1743344604\u0026Signature=ASXIjMl5zmHe9rnrUXBYAeXWFQs-iKQz4JoB8i3VAPrLF--Ifc3qgZQuuenK7ynhyOCqnafziQbPW6mBZBSuteuWDIsd4X88MoJPhrdqk7HnwkYCY46hV2pWMhot5kCBOyVxlcTmepYZ~vS9X3bsRNa6UHRplnJRu7LMirwJXvx3wJcnA8~m9qLpVOZp3wZSelOo1xvIEiQt0Tv6t9epmLTv8jqB2gNTRzHhZl-SV6uccUfa7m32wXQOqyNQ~PuxKCFGT3zvoC13eV9hg90Pc1L6Su~qdf7m7OgRpRj9jXdvyhHoeYOVquWzVEDw6WMWq8ZaOqVuCBP5ve0SrqZ0TA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Belief_functions_A_gentle_introduction","translated_slug":"","page_count":125,"language":"en","content_type":"Work","summary":"The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. Belief theory and the closely related random set theory form natural frameworks for modelling situations in which data are missing or scarce: think of extremely rare events such as volcanic eruptions or power plant meltdowns, problems subject to huge uncertainties due to the number and complexity of the factors involved (e.g. climate change), but also the all-important issue with generalisation from small training sets in machine learning. This tutorial is designed to introduce the principles and rationale of random sets and belief function theory to mainstream statisticians, mathematicians and working scientists, survey the key elements of the methodology and the most recent developments, make practitioners aware of the set of tools that have been developed for reasoning in the belief function framework on real-world problems. Attendees will acquire first-hand knowledge of how to apply these tools to significant problems in major application fields such as computer vision, climate change, and others. A research programme for the future of random set theory and high impact applications is eventually outlined.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":57132594,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57132594/thumbnails/1.jpg","file_name":"Seoul2018.pdf","download_url":"https://www.academia.edu/attachments/57132594/download_file","bulk_download_file_name":"Belief_functions_A_gentle_introduction.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57132594/Seoul2018-libre.pdf?1533419774=\u0026response-content-disposition=attachment%3B+filename%3DBelief_functions_A_gentle_introduction.pdf\u0026Expires=1743344604\u0026Signature=ASXIjMl5zmHe9rnrUXBYAeXWFQs-iKQz4JoB8i3VAPrLF--Ifc3qgZQuuenK7ynhyOCqnafziQbPW6mBZBSuteuWDIsd4X88MoJPhrdqk7HnwkYCY46hV2pWMhot5kCBOyVxlcTmepYZ~vS9X3bsRNa6UHRplnJRu7LMirwJXvx3wJcnA8~m9qLpVOZp3wZSelOo1xvIEiQt0Tv6t9epmLTv8jqB2gNTRzHhZl-SV6uccUfa7m32wXQOqyNQ~PuxKCFGT3zvoC13eV9hg90Pc1L6Su~qdf7m7OgRpRj9jXdvyhHoeYOVquWzVEDw6WMWq8ZaOqVuCBP5ve0SrqZ0TA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-37181239-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="36221064"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/36221064/Towards_machines_that_can_read_your_mind"><img alt="Research paper thumbnail of Towards machines that can read your mind" class="work-thumbnail" src="https://attachments.academia-assets.com/56123768/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/36221064/Towards_machines_that_can_read_your_mind">Towards machines that can read your mind</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Artificial intelligence is becoming part of our lives. Smart cars will engage our roads in less t...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Artificial intelligence is becoming part of our lives. Smart cars will engage our roads in less than ten years’ time; shops with no checkout, which automatically recognise customers and what they purchase, are already open for business. But to enable machines to deal with uncertainty, we must fundamentally change the way machines learn from the data they observe so that they will be able to cope with situations they have never encountered in the safest possible way. Interacting naturally with human beings and their complex environments will only be possible if machines are able to put themselves in people’s shoes: to guess their goals, beliefs and intentions – in other words, to read our minds.<br /><br />Fabio will explain just how machines can be provided with this mind-reading ability.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="514586b55fb87e04f08bd9e1fcc0980e" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":56123768,"asset_id":36221064,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/56123768/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="36221064"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="36221064"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 36221064; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=36221064]").text(description); $(".js-view-count[data-work-id=36221064]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 36221064; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='36221064']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "514586b55fb87e04f08bd9e1fcc0980e" } } $('.js-work-strip[data-work-id=36221064]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":36221064,"title":"Towards machines that can read your mind","translated_title":"","metadata":{"abstract":"Artificial intelligence is becoming part of our lives. Smart cars will engage our roads in less than ten years’ time; shops with no checkout, which automatically recognise customers and what they purchase, are already open for business. But to enable machines to deal with uncertainty, we must fundamentally change the way machines learn from the data they observe so that they will be able to cope with situations they have never encountered in the safest possible way. Interacting naturally with human beings and their complex environments will only be possible if machines are able to put themselves in people’s shoes: to guess their goals, beliefs and intentions – in other words, to read our minds.\n\nFabio will explain just how machines can be provided with this mind-reading ability."},"translated_abstract":"Artificial intelligence is becoming part of our lives. Smart cars will engage our roads in less than ten years’ time; shops with no checkout, which automatically recognise customers and what they purchase, are already open for business. But to enable machines to deal with uncertainty, we must fundamentally change the way machines learn from the data they observe so that they will be able to cope with situations they have never encountered in the safest possible way. Interacting naturally with human beings and their complex environments will only be possible if machines are able to put themselves in people’s shoes: to guess their goals, beliefs and intentions – in other words, to read our minds.\n\nFabio will explain just how machines can be provided with this mind-reading ability.","internal_url":"https://www.academia.edu/36221064/Towards_machines_that_can_read_your_mind","translated_internal_url":"","created_at":"2018-03-21T11:18:05.480-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":56123768,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/56123768/thumbnails/1.jpg","file_name":"Professorial_lecture.pdf","download_url":"https://www.academia.edu/attachments/56123768/download_file","bulk_download_file_name":"Towards_machines_that_can_read_your_mind.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/56123768/Professorial_lecture-libre.pdf?1521657304=\u0026response-content-disposition=attachment%3B+filename%3DTowards_machines_that_can_read_your_mind.pdf\u0026Expires=1743344604\u0026Signature=EhLNs-7zP723KtRId4T~DLK-U~vnM7R9L4XsWnjFCQDGKUB36tEQtgQ4XWHaZPzuP~i5ZZK4mqCDODshWw~zEESFLEBQJTWSpzOf2pY7joiqzCkBKdEB457HYAMOzrEXdcOK5WR5Ghn8KjQvkCcvIDa4TrQ4C8JN-j6GvxszmuYKLHpXOOHzq-eujC0kVSqiMpZ1igk1gN0dBWmQDMhG6hUZ7w4-FkWM~z1L9nDhBFAc2IZnJLpXwjmNF-O44HiPnp2xJmoOUitXPNksd3q-~M2E06-w08i3hhaWyJeyh8~ZTUfFN58po~BwVK1E1wAtxeznD48v-JtvWfj9iZqAwA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Towards_machines_that_can_read_your_mind","translated_slug":"","page_count":69,"language":"en","content_type":"Work","summary":"Artificial intelligence is becoming part of our lives. Smart cars will engage our roads in less than ten years’ time; shops with no checkout, which automatically recognise customers and what they purchase, are already open for business. But to enable machines to deal with uncertainty, we must fundamentally change the way machines learn from the data they observe so that they will be able to cope with situations they have never encountered in the safest possible way. Interacting naturally with human beings and their complex environments will only be possible if machines are able to put themselves in people’s shoes: to guess their goals, beliefs and intentions – in other words, to read our minds.\n\nFabio will explain just how machines can be provided with this mind-reading ability.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":56123768,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/56123768/thumbnails/1.jpg","file_name":"Professorial_lecture.pdf","download_url":"https://www.academia.edu/attachments/56123768/download_file","bulk_download_file_name":"Towards_machines_that_can_read_your_mind.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/56123768/Professorial_lecture-libre.pdf?1521657304=\u0026response-content-disposition=attachment%3B+filename%3DTowards_machines_that_can_read_your_mind.pdf\u0026Expires=1743344604\u0026Signature=EhLNs-7zP723KtRId4T~DLK-U~vnM7R9L4XsWnjFCQDGKUB36tEQtgQ4XWHaZPzuP~i5ZZK4mqCDODshWw~zEESFLEBQJTWSpzOf2pY7joiqzCkBKdEB457HYAMOzrEXdcOK5WR5Ghn8KjQvkCcvIDa4TrQ4C8JN-j6GvxszmuYKLHpXOOHzq-eujC0kVSqiMpZ1igk1gN0dBWmQDMhG6hUZ7w4-FkWM~z1L9nDhBFAc2IZnJLpXwjmNF-O44HiPnp2xJmoOUitXPNksd3q-~M2E06-w08i3hhaWyJeyh8~ZTUfFN58po~BwVK1E1wAtxeznD48v-JtvWfj9iZqAwA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4937,"name":"Theory of Mind","url":"https://www.academia.edu/Documents/in/Theory_of_Mind"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":20053,"name":"Theory of Mind (Psychology)","url":"https://www.academia.edu/Documents/in/Theory_of_Mind_Psychology_"},{"id":53657,"name":"Computational Theory of Mind","url":"https://www.academia.edu/Documents/in/Computational_Theory_of_Mind"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":142848,"name":"Theory of Mind (ToM) / Empathy / Emotion Recognition.","url":"https://www.academia.edu/Documents/in/Theory_of_Mind_ToM_Empathy_Emotion_Recognition"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-36221064-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="34998512"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/34998512/AMTnet_Action_Micro_Tube_Regression_by_End_to_end_Trainable_Deep_Architecture"><img alt="Research paper thumbnail of AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture" class="work-thumbnail" src="https://attachments.academia-assets.com/54863098/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/34998512/AMTnet_Action_Micro_Tube_Regression_by_End_to_end_Trainable_Deep_Architecture">AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a> and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/GurkirtSingh">Gurkirt Singh</a></span></div><div class="wp-workCard_item"><span>ICCV 2017</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Poster presented at ICCV 2017</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="30f9920621151db92136ae8acb3897a1" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":54863098,"asset_id":34998512,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/54863098/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="34998512"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="34998512"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 34998512; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=34998512]").text(description); $(".js-view-count[data-work-id=34998512]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 34998512; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='34998512']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "30f9920621151db92136ae8acb3897a1" } } $('.js-work-strip[data-work-id=34998512]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":34998512,"title":"AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture","translated_title":"","metadata":{"abstract":"Poster presented at ICCV 2017","ai_title_tag":"Action-Micro-Tube Regression with AMTnet","publication_name":"ICCV 2017"},"translated_abstract":"Poster presented at ICCV 2017","internal_url":"https://www.academia.edu/34998512/AMTnet_Action_Micro_Tube_Regression_by_End_to_end_Trainable_Deep_Architecture","translated_internal_url":"","created_at":"2017-10-30T06:48:49.570-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":30550114,"work_id":34998512,"tagging_user_id":366407,"tagged_user_id":51192593,"co_author_invite_id":null,"email":"s***4@brookes.ac.uk","display_order":1,"name":"Suman Saha","title":"AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture"},{"id":30550115,"work_id":34998512,"tagging_user_id":366407,"tagged_user_id":4032373,"co_author_invite_id":null,"email":"g***4@gmail.com","display_order":2,"name":"Gurkirt Singh","title":"AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture"},{"id":30550116,"work_id":34998512,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5034152,"email":"f***n@gmail.com","display_order":3,"name":"Fabio Cuzzolin","title":"AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture"},{"id":30550117,"work_id":34998512,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5334584,"email":"f***n@inrialpes.fr","display_order":5,"name":"Fabio Cuzzolin","title":"AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture"},{"id":30550118,"work_id":34998512,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":4909929,"email":"c***n@cs.ucla.edu","display_order":6,"name":"Fabio Cuzzolin","title":"AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture"},{"id":30550119,"work_id":34998512,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":1513371,"email":"p***9@brookes.ac.uk","display_order":7,"name":"Fabio Cuzzolin","title":"AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture"},{"id":30550120,"work_id":34998512,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5334585,"email":"c***n@dei.unipd.it","display_order":8,"name":"Fabio Cuzzolin","title":"AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture"},{"id":30550121,"work_id":34998512,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":6561295,"email":"g***h@yahoo.in","display_order":9,"name":"Gurkirt Singh","title":"AMTnet: Action-Micro-Tube Regression by End-to-end Trainable Deep Architecture"}],"downloadable_attachments":[{"id":54863098,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/54863098/thumbnails/1.jpg","file_name":"poster.pdf","download_url":"https://www.academia.edu/attachments/54863098/download_file","bulk_download_file_name":"AMTnet_Action_Micro_Tube_Regression_by_E.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/54863098/poster-libre.pdf?1509371736=\u0026response-content-disposition=attachment%3B+filename%3DAMTnet_Action_Micro_Tube_Regression_by_E.pdf\u0026Expires=1743078337\u0026Signature=ULRtW5Vjt2SUxvlwvX-yJ2XhnPmM0io-DHI1-LIwKk80y3zWXO0g0eItBIyhFSu1G4pBFdIdgNjRePzimN1WNzym4tAzKEYrMijypuhZMLO12T~tJOHetq6laz2UvyUVaedCUYls13V5ycE6HNDhTp-iAMjBA1iCp3BbI7VsFTtqSZu9bZ3LRYQuIS4ntrgAtGKJhSbnWRTx81ZjOSClPXJR4Z60~L8yPkHGcvIXkZaFKtYW3-EbVeU02Q0fx689taFa6wDOT7ZWM5nSV4Yl3N45Nm25jPS91rRZ~wjbjQtdMWJRMecPMUYXtEB3uupLyB78YG5duOfID~lRl8B65Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"AMTnet_Action_Micro_Tube_Regression_by_End_to_end_Trainable_Deep_Architecture","translated_slug":"","page_count":1,"language":"en","content_type":"Work","summary":"Poster presented at ICCV 2017","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":54863098,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/54863098/thumbnails/1.jpg","file_name":"poster.pdf","download_url":"https://www.academia.edu/attachments/54863098/download_file","bulk_download_file_name":"AMTnet_Action_Micro_Tube_Regression_by_E.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/54863098/poster-libre.pdf?1509371736=\u0026response-content-disposition=attachment%3B+filename%3DAMTnet_Action_Micro_Tube_Regression_by_E.pdf\u0026Expires=1743078337\u0026Signature=ULRtW5Vjt2SUxvlwvX-yJ2XhnPmM0io-DHI1-LIwKk80y3zWXO0g0eItBIyhFSu1G4pBFdIdgNjRePzimN1WNzym4tAzKEYrMijypuhZMLO12T~tJOHetq6laz2UvyUVaedCUYls13V5ycE6HNDhTp-iAMjBA1iCp3BbI7VsFTtqSZu9bZ3LRYQuIS4ntrgAtGKJhSbnWRTx81ZjOSClPXJR4Z60~L8yPkHGcvIXkZaFKtYW3-EbVeU02Q0fx689taFa6wDOT7ZWM5nSV4Yl3N45Nm25jPS91rRZ~wjbjQtdMWJRMecPMUYXtEB3uupLyB78YG5duOfID~lRl8B65Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":1185,"name":"Image Processing","url":"https://www.academia.edu/Documents/in/Image_Processing"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":5109,"name":"Pattern Recognition","url":"https://www.academia.edu/Documents/in/Pattern_Recognition"},{"id":7728,"name":"Object Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Object_Recognition_Computer_Vision_"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":56440,"name":"OpenCv or Computer Vision","url":"https://www.academia.edu/Documents/in/OpenCv_or_Computer_Vision"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":143038,"name":"Machine Learning and Pattern Recognition","url":"https://www.academia.edu/Documents/in/Machine_Learning_and_Pattern_Recognition"},{"id":1211304,"name":"Artificial Neural Network","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Network"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="33854995"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/33854995/The_statistics_of_belief_functions_Invited_talk_at_the_4th_BFAS_Summer_School_on_Belief_Functions_and_their_Applications"><img alt="Research paper thumbnail of The statistics of belief functions - Invited talk at the 4th BFAS Summer School on Belief Functions and their Applications" class="work-thumbnail" src="https://attachments.academia-assets.com/53834877/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/33854995/The_statistics_of_belief_functions_Invited_talk_at_the_4th_BFAS_Summer_School_on_Belief_Functions_and_their_Applications">The statistics of belief functions - Invited talk at the 4th BFAS Summer School on Belief Functions and their Applications</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Although born within the remit of mathematical statistics, the theory of belief functions has lat...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Although born within the remit of mathematical statistics, the theory of belief functions has later evolved towards subjective interpretations which have distanced it from its mother field, and have drawn it nearer to artificial intelligence.<br />The purpose of this talk, in its first part, is to understanding belief theory in the context of mathematical probability and its main interpretations, Bayesian and frequentist statistics, contrasting these three methodologies according to their treatment of uncertain data.<br />In the second part we recall the existing statistical views of belief function theory, due to the work by Dempster, Almond, Hummel and Landy, Zhang and Liu, Walley and Fine, among others.<br />Finally, we outline a research programme for the development of a fully-fledged theory of statistical inference with random sets. In particular, we discuss the notion of generalised lower and upper likelihoods, the formulation of a framework for logistic regression with belief functions, the generalisation of the classical total probability theorem to belief functions, the formulation of parametric models based of random sets, and the development of a theory of random variables and processes in which the underlying probability space is replaced by a random set space.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="031aa7f7b11f867c87a889e094d1595a" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":53834877,"asset_id":33854995,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/53834877/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="33854995"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="33854995"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 33854995; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=33854995]").text(description); $(".js-view-count[data-work-id=33854995]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 33854995; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='33854995']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "031aa7f7b11f867c87a889e094d1595a" } } $('.js-work-strip[data-work-id=33854995]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":33854995,"title":"The statistics of belief functions - Invited talk at the 4th BFAS Summer School on Belief Functions and their Applications","translated_title":"","metadata":{"abstract":"Although born within the remit of mathematical statistics, the theory of belief functions has later evolved towards subjective interpretations which have distanced it from its mother field, and have drawn it nearer to artificial intelligence.\nThe purpose of this talk, in its first part, is to understanding belief theory in the context of mathematical probability and its main interpretations, Bayesian and frequentist statistics, contrasting these three methodologies according to their treatment of uncertain data.\nIn the second part we recall the existing statistical views of belief function theory, due to the work by Dempster, Almond, Hummel and Landy, Zhang and Liu, Walley and Fine, among others.\nFinally, we outline a research programme for the development of a fully-fledged theory of statistical inference with random sets. In particular, we discuss the notion of generalised lower and upper likelihoods, the formulation of a framework for logistic regression with belief functions, the generalisation of the classical total probability theorem to belief functions, the formulation of parametric models based of random sets, and the development of a theory of random variables and processes in which the underlying probability space is replaced by a random set space."},"translated_abstract":"Although born within the remit of mathematical statistics, the theory of belief functions has later evolved towards subjective interpretations which have distanced it from its mother field, and have drawn it nearer to artificial intelligence.\nThe purpose of this talk, in its first part, is to understanding belief theory in the context of mathematical probability and its main interpretations, Bayesian and frequentist statistics, contrasting these three methodologies according to their treatment of uncertain data.\nIn the second part we recall the existing statistical views of belief function theory, due to the work by Dempster, Almond, Hummel and Landy, Zhang and Liu, Walley and Fine, among others.\nFinally, we outline a research programme for the development of a fully-fledged theory of statistical inference with random sets. In particular, we discuss the notion of generalised lower and upper likelihoods, the formulation of a framework for logistic regression with belief functions, the generalisation of the classical total probability theorem to belief functions, the formulation of parametric models based of random sets, and the development of a theory of random variables and processes in which the underlying probability space is replaced by a random set space.","internal_url":"https://www.academia.edu/33854995/The_statistics_of_belief_functions_Invited_talk_at_the_4th_BFAS_Summer_School_on_Belief_Functions_and_their_Applications","translated_internal_url":"","created_at":"2017-07-12T00:59:08.415-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":53834877,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/53834877/thumbnails/1.jpg","file_name":"BELIEF2017.pdf","download_url":"https://www.academia.edu/attachments/53834877/download_file","bulk_download_file_name":"The_statistics_of_belief_functions_Invit.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/53834877/BELIEF2017-libre.pdf?1499846489=\u0026response-content-disposition=attachment%3B+filename%3DThe_statistics_of_belief_functions_Invit.pdf\u0026Expires=1743344604\u0026Signature=WB3CuknRRwbcTQ3Es1P8Aq5cVvTEDfRdL~oX5Sf9BG~u9ehyn4V5F3Ik4MhmKEWXZWkghTZgIvMxNsyDcaPWpA7LTjvegoMEYTBdKX96j6hAfPTlMGcX3dLfYsgXgcnK0KRpn31nM3ljV-4RAbY36yWte2Gf3FioYpxD7kunjznZ7Iiq7Fk38YuSWCqPMCDJwBty02II-zL1QZlZEsYx9WCvVjFxmREzcQUKlWChIR~MPJBq4Z-IXEhwUmN~mvBXu8HQLoDqtcQmRxkJ0sGYcZ5SkY~Q3DORZvv~BjcG-jkDgks~2A83BqD25RjwjFlww0C8svEWHBvmsofQ8~AMng__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"The_statistics_of_belief_functions_Invited_talk_at_the_4th_BFAS_Summer_School_on_Belief_Functions_and_their_Applications","translated_slug":"","page_count":111,"language":"en","content_type":"Work","summary":"Although born within the remit of mathematical statistics, the theory of belief functions has later evolved towards subjective interpretations which have distanced it from its mother field, and have drawn it nearer to artificial intelligence.\nThe purpose of this talk, in its first part, is to understanding belief theory in the context of mathematical probability and its main interpretations, Bayesian and frequentist statistics, contrasting these three methodologies according to their treatment of uncertain data.\nIn the second part we recall the existing statistical views of belief function theory, due to the work by Dempster, Almond, Hummel and Landy, Zhang and Liu, Walley and Fine, among others.\nFinally, we outline a research programme for the development of a fully-fledged theory of statistical inference with random sets. In particular, we discuss the notion of generalised lower and upper likelihoods, the formulation of a framework for logistic regression with belief functions, the generalisation of the classical total probability theorem to belief functions, the formulation of parametric models based of random sets, and the development of a theory of random variables and processes in which the underlying probability space is replaced by a random set space.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":53834877,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/53834877/thumbnails/1.jpg","file_name":"BELIEF2017.pdf","download_url":"https://www.academia.edu/attachments/53834877/download_file","bulk_download_file_name":"The_statistics_of_belief_functions_Invit.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/53834877/BELIEF2017-libre.pdf?1499846489=\u0026response-content-disposition=attachment%3B+filename%3DThe_statistics_of_belief_functions_Invit.pdf\u0026Expires=1743344604\u0026Signature=WB3CuknRRwbcTQ3Es1P8Aq5cVvTEDfRdL~oX5Sf9BG~u9ehyn4V5F3Ik4MhmKEWXZWkghTZgIvMxNsyDcaPWpA7LTjvegoMEYTBdKX96j6hAfPTlMGcX3dLfYsgXgcnK0KRpn31nM3ljV-4RAbY36yWte2Gf3FioYpxD7kunjznZ7Iiq7Fk38YuSWCqPMCDJwBty02II-zL1QZlZEsYx9WCvVjFxmREzcQUKlWChIR~MPJBq4Z-IXEhwUmN~mvBXu8HQLoDqtcQmRxkJ0sGYcZ5SkY~Q3DORZvv~BjcG-jkDgks~2A83BqD25RjwjFlww0C8svEWHBvmsofQ8~AMng__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":6132,"name":"Soft Computing","url":"https://www.academia.edu/Documents/in/Soft_Computing"},{"id":9796,"name":"Philosophy Of Probability","url":"https://www.academia.edu/Documents/in/Philosophy_Of_Probability"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":42558,"name":"Artifical Intelligence","url":"https://www.academia.edu/Documents/in/Artifical_Intelligence"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":122987,"name":"Non-probabilistic Modeling and Imprecise Probabilities","url":"https://www.academia.edu/Documents/in/Non-probabilistic_Modeling_and_Imprecise_Probabilities"},{"id":141502,"name":"Inteligencia artificial","url":"https://www.academia.edu/Documents/in/Inteligencia_artificial-1"},{"id":303033,"name":"Belief Function","url":"https://www.academia.edu/Documents/in/Belief_Function"},{"id":343413,"name":"Applied Probability and Statistics","url":"https://www.academia.edu/Documents/in/Applied_Probability_and_Statistics"},{"id":378224,"name":"Imprecise probabilities","url":"https://www.academia.edu/Documents/in/Imprecise_probabilities"},{"id":378227,"name":"Imprecise Previsions","url":"https://www.academia.edu/Documents/in/Imprecise_Previsions"},{"id":571797,"name":"Introduction to Probability","url":"https://www.academia.edu/Documents/in/Introduction_to_Probability"},{"id":655694,"name":"Subjective Probability - Fuzzy Theory and Belief Functions","url":"https://www.academia.edu/Documents/in/Subjective_Probability_-_Fuzzy_Theory_and_Belief_Functions"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-33854995-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="27126184"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/27126184/ONLINE_HUMAN_ACTION_LOCALISATION_BASED_ON_APPEARANCE_AND_MOTION_CUES"><img alt="Research paper thumbnail of ONLINE HUMAN ACTION LOCALISATION BASED ON APPEARANCE AND MOTION CUES" class="work-thumbnail" src="https://attachments.academia-assets.com/47376313/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/27126184/ONLINE_HUMAN_ACTION_LOCALISATION_BASED_ON_APPEARANCE_AND_MOTION_CUES">ONLINE HUMAN ACTION LOCALISATION BASED ON APPEARANCE AND MOTION CUES</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">We investigate the problem of online action localisation in videos. Our model uses appearance and...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">We investigate the problem of online action localisation in videos. Our model uses appearance and motion cues to generate region proposals from streaming video frames. Recently, deep feature representation outperforms the handcrafted features in object classification. Driven by this progress, we model our system using deep CNN features. We proposed an online incremental learning framework which initially learns from a burst of streaming video frames and iteratively updates the learner by solving a set of linear SVMs (1-vs-rest) using a batch stochastic gradient descent (SGD) algorithm with hard example mining.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="a35533f4540cda2ee12e410080baf64c" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47376313,"asset_id":27126184,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47376313/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="27126184"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="27126184"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 27126184; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=27126184]").text(description); $(".js-view-count[data-work-id=27126184]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 27126184; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='27126184']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "a35533f4540cda2ee12e410080baf64c" } } $('.js-work-strip[data-work-id=27126184]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":27126184,"title":"ONLINE HUMAN ACTION LOCALISATION BASED ON APPEARANCE AND MOTION CUES","translated_title":"","metadata":{"abstract":"We investigate the problem of online action localisation in videos. Our model uses appearance and motion cues to generate region proposals from streaming video frames. Recently, deep feature representation outperforms the handcrafted features in object classification. Driven by this progress, we model our system using deep CNN features. We proposed an online incremental learning framework which initially learns from a burst of streaming video frames and iteratively updates the learner by solving a set of linear SVMs (1-vs-rest) using a batch stochastic gradient descent (SGD) algorithm with hard example mining.","ai_title_tag":"Online Video Action Localization Using Deep Learning"},"translated_abstract":"We investigate the problem of online action localisation in videos. Our model uses appearance and motion cues to generate region proposals from streaming video frames. Recently, deep feature representation outperforms the handcrafted features in object classification. Driven by this progress, we model our system using deep CNN features. We proposed an online incremental learning framework which initially learns from a burst of streaming video frames and iteratively updates the learner by solving a set of linear SVMs (1-vs-rest) using a batch stochastic gradient descent (SGD) algorithm with hard example mining.","internal_url":"https://www.academia.edu/27126184/ONLINE_HUMAN_ACTION_LOCALISATION_BASED_ON_APPEARANCE_AND_MOTION_CUES","translated_internal_url":"","created_at":"2016-07-20T04:20:52.253-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":22576094,"work_id":27126184,"tagging_user_id":366407,"tagged_user_id":51192593,"co_author_invite_id":5039173,"email":"s***4@brookes.ac.uk","display_order":1,"name":"Suman Saha","title":"ONLINE HUMAN ACTION LOCALISATION BASED ON APPEARANCE AND MOTION CUES"},{"id":22576095,"work_id":27126184,"tagging_user_id":366407,"tagged_user_id":35748463,"co_author_invite_id":null,"email":"m***a@eng.ox.ac.uk","display_order":2,"name":"Michael Sapienza","title":"ONLINE HUMAN ACTION LOCALISATION BASED ON APPEARANCE AND MOTION CUES"}],"downloadable_attachments":[{"id":47376313,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47376313/thumbnails/1.jpg","file_name":"poster_1.pdf","download_url":"https://www.academia.edu/attachments/47376313/download_file","bulk_download_file_name":"ONLINE_HUMAN_ACTION_LOCALISATION_BASED_O.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47376313/poster_1-libre.pdf?1469013993=\u0026response-content-disposition=attachment%3B+filename%3DONLINE_HUMAN_ACTION_LOCALISATION_BASED_O.pdf\u0026Expires=1743344604\u0026Signature=d2BJQ6loBl2Xn~QxZaTQxsg70Ae5cRwulEjTkHR9l8aHAUXdhybT0fGdCfSiAofNOi0m7KN0woe9o1eHdqFtohq5wkVxgF3iAE8ctSDqnTvP26br1ySdzbG5mLTY~iNZ99x5V22zcYZud3-ZCAkfa1H5OUvs1S~P4qZ7zMvwaaA66pwr78jegOahWvf3~eMoz1DmsnPKaE4SsRQvrhEZFFiNmbx~ICMf2yLLyppnn9N1nuMcnSSaZr4eUZNv4qSquT3s-dm4iGVnWQdjfnThB2vls9yKRkC-pP6FwZMgYEboDZ72XbQUVDjhmQqIdMIJqIzI8FlMORleXF7j-gy27w__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"ONLINE_HUMAN_ACTION_LOCALISATION_BASED_ON_APPEARANCE_AND_MOTION_CUES","translated_slug":"","page_count":1,"language":"en","content_type":"Work","summary":"We investigate the problem of online action localisation in videos. Our model uses appearance and motion cues to generate region proposals from streaming video frames. Recently, deep feature representation outperforms the handcrafted features in object classification. Driven by this progress, we model our system using deep CNN features. We proposed an online incremental learning framework which initially learns from a burst of streaming video frames and iteratively updates the learner by solving a set of linear SVMs (1-vs-rest) using a batch stochastic gradient descent (SGD) algorithm with hard example mining.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":47376313,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47376313/thumbnails/1.jpg","file_name":"poster_1.pdf","download_url":"https://www.academia.edu/attachments/47376313/download_file","bulk_download_file_name":"ONLINE_HUMAN_ACTION_LOCALISATION_BASED_O.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47376313/poster_1-libre.pdf?1469013993=\u0026response-content-disposition=attachment%3B+filename%3DONLINE_HUMAN_ACTION_LOCALISATION_BASED_O.pdf\u0026Expires=1743344604\u0026Signature=d2BJQ6loBl2Xn~QxZaTQxsg70Ae5cRwulEjTkHR9l8aHAUXdhybT0fGdCfSiAofNOi0m7KN0woe9o1eHdqFtohq5wkVxgF3iAE8ctSDqnTvP26br1ySdzbG5mLTY~iNZ99x5V22zcYZud3-ZCAkfa1H5OUvs1S~P4qZ7zMvwaaA66pwr78jegOahWvf3~eMoz1DmsnPKaE4SsRQvrhEZFFiNmbx~ICMf2yLLyppnn9N1nuMcnSSaZr4eUZNv4qSquT3s-dm4iGVnWQdjfnThB2vls9yKRkC-pP6FwZMgYEboDZ72XbQUVDjhmQqIdMIJqIzI8FlMORleXF7j-gy27w__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":1185,"name":"Image Processing","url":"https://www.academia.edu/Documents/in/Image_Processing"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":10005,"name":"Applications of Machine Learning","url":"https://www.academia.edu/Documents/in/Applications_of_Machine_Learning"},{"id":15665,"name":"Video Processing","url":"https://www.academia.edu/Documents/in/Video_Processing"},{"id":17701,"name":"Gesture Recognition","url":"https://www.academia.edu/Documents/in/Gesture_Recognition"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":315678,"name":"Human Action Recognition","url":"https://www.academia.edu/Documents/in/Human_Action_Recognition"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-27126184-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="27126098"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/27126098/DEEP_LEARNING_FOR_DETECTING_MULTIPLE_SPACE_TIME_ACTION_TUBES_IN_VIDEOS"><img alt="Research paper thumbnail of DEEP LEARNING FOR DETECTING MULTIPLE SPACE-TIME ACTION TUBES IN VIDEOS" class="work-thumbnail" src="https://attachments.academia-assets.com/47376237/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/27126098/DEEP_LEARNING_FOR_DETECTING_MULTIPLE_SPACE_TIME_ACTION_TUBES_IN_VIDEOS">DEEP LEARNING FOR DETECTING MULTIPLE SPACE-TIME ACTION TUBES IN VIDEOS</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a>, <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/SumanSaha35">Suman Saha</a>, and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/GurkirtSingh">Gurkirt Singh</a></span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">In this work we propose a new approach to the spatiotemporal localisation (detection) and classif...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">In this work we propose a new approach<br />to the spatiotemporal localisation (detection)<br />and classification of multiple concurrent actions<br />within temporally untrimmed videos. Our<br />framework is composed of three stages. In stage<br />1, a cascade of deep region proposal and detection<br />networks are employed to classify regions<br />of each video frame potentially containing an<br />action of interest. In stage 2, appearance and<br />motion cues are combined by merging the detection<br />boxes and softmax classification scores<br />generated by the two cascades. In stage 3, sequences<br />of detection boxes most likely to be associated<br />with a single action instance, called action<br />tubes, are constructed by solving two optimisation<br />problems via dynamic programming.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="113335a761e33f9d1b467ebe1f2b58c2" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47376237,"asset_id":27126098,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47376237/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="27126098"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="27126098"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 27126098; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=27126098]").text(description); $(".js-view-count[data-work-id=27126098]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 27126098; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='27126098']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "113335a761e33f9d1b467ebe1f2b58c2" } } $('.js-work-strip[data-work-id=27126098]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":27126098,"title":"DEEP LEARNING FOR DETECTING MULTIPLE SPACE-TIME ACTION TUBES IN VIDEOS","translated_title":"","metadata":{"abstract":"In this work we propose a new approach\nto the spatiotemporal localisation (detection)\nand classification of multiple concurrent actions\nwithin temporally untrimmed videos. Our\nframework is composed of three stages. In stage\n1, a cascade of deep region proposal and detection\nnetworks are employed to classify regions\nof each video frame potentially containing an\naction of interest. In stage 2, appearance and\nmotion cues are combined by merging the detection\nboxes and softmax classification scores\ngenerated by the two cascades. In stage 3, sequences\nof detection boxes most likely to be associated\nwith a single action instance, called action\ntubes, are constructed by solving two optimisation\nproblems via dynamic programming."},"translated_abstract":"In this work we propose a new approach\nto the spatiotemporal localisation (detection)\nand classification of multiple concurrent actions\nwithin temporally untrimmed videos. Our\nframework is composed of three stages. In stage\n1, a cascade of deep region proposal and detection\nnetworks are employed to classify regions\nof each video frame potentially containing an\naction of interest. In stage 2, appearance and\nmotion cues are combined by merging the detection\nboxes and softmax classification scores\ngenerated by the two cascades. In stage 3, sequences\nof detection boxes most likely to be associated\nwith a single action instance, called action\ntubes, are constructed by solving two optimisation\nproblems via dynamic programming.","internal_url":"https://www.academia.edu/27126098/DEEP_LEARNING_FOR_DETECTING_MULTIPLE_SPACE_TIME_ACTION_TUBES_IN_VIDEOS","translated_internal_url":"","created_at":"2016-07-20T04:17:18.928-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":22576030,"work_id":27126098,"tagging_user_id":366407,"tagged_user_id":51192593,"co_author_invite_id":5039173,"email":"s***4@brookes.ac.uk","display_order":1,"name":"Suman Saha","title":"DEEP LEARNING FOR DETECTING MULTIPLE SPACE-TIME ACTION TUBES IN VIDEOS"},{"id":22576031,"work_id":27126098,"tagging_user_id":366407,"tagged_user_id":4032373,"co_author_invite_id":null,"email":"g***4@gmail.com","display_order":2,"name":"Gurkirt Singh","title":"DEEP LEARNING FOR DETECTING MULTIPLE SPACE-TIME ACTION TUBES IN VIDEOS"},{"id":22576032,"work_id":27126098,"tagging_user_id":366407,"tagged_user_id":312333,"co_author_invite_id":null,"email":"p***r@hotmail.com","affiliation":"Oxford Brookes University","display_order":3,"name":"philip torr","title":"DEEP LEARNING FOR DETECTING MULTIPLE SPACE-TIME ACTION TUBES IN VIDEOS"},{"id":22576033,"work_id":27126098,"tagging_user_id":366407,"tagged_user_id":35748463,"co_author_invite_id":null,"email":"m***a@eng.ox.ac.uk","display_order":4,"name":"Michael Sapienza","title":"DEEP LEARNING FOR DETECTING MULTIPLE SPACE-TIME ACTION TUBES IN VIDEOS"}],"downloadable_attachments":[{"id":47376237,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47376237/thumbnails/1.jpg","file_name":"poster_2.pdf","download_url":"https://www.academia.edu/attachments/47376237/download_file","bulk_download_file_name":"DEEP_LEARNING_FOR_DETECTING_MULTIPLE_SPA.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47376237/poster_2-libre.pdf?1469013582=\u0026response-content-disposition=attachment%3B+filename%3DDEEP_LEARNING_FOR_DETECTING_MULTIPLE_SPA.pdf\u0026Expires=1743344604\u0026Signature=QJ5jHwHjDoCf6FXJz4TU-AesGbEd-1KSWNTXV5MyiCp5sAIp0PfWv6Xr9XBO7Jgb8DvbrCB6ghORZDzIuhwluQGwWTIRvVfQryvixtoOBSxrZBq~oG-N2Uq52RgLoaJghlsZqlzGyEyq3~znbSlY-SKqbk2kTKr39wt3lXGx35nE37p~rzDsajF9YP-XKQGoZsoFnouFji6HyCxJdxfVXrjsBcixyrgNNk65LmKGBIxp-M2a-CSVZH8D5VIoTjEX4WgtmhZjcgqdMcrOMBZ6br7J4XL-GVRMDbqrCf5XdwLHQJDfvmYjCDxHT37s0zNyJW-uyyWhqH92TwA1saVOeg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"DEEP_LEARNING_FOR_DETECTING_MULTIPLE_SPACE_TIME_ACTION_TUBES_IN_VIDEOS","translated_slug":"","page_count":1,"language":"en","content_type":"Work","summary":"In this work we propose a new approach\nto the spatiotemporal localisation (detection)\nand classification of multiple concurrent actions\nwithin temporally untrimmed videos. Our\nframework is composed of three stages. In stage\n1, a cascade of deep region proposal and detection\nnetworks are employed to classify regions\nof each video frame potentially containing an\naction of interest. In stage 2, appearance and\nmotion cues are combined by merging the detection\nboxes and softmax classification scores\ngenerated by the two cascades. In stage 3, sequences\nof detection boxes most likely to be associated\nwith a single action instance, called action\ntubes, are constructed by solving two optimisation\nproblems via dynamic programming.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":47376237,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47376237/thumbnails/1.jpg","file_name":"poster_2.pdf","download_url":"https://www.academia.edu/attachments/47376237/download_file","bulk_download_file_name":"DEEP_LEARNING_FOR_DETECTING_MULTIPLE_SPA.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47376237/poster_2-libre.pdf?1469013582=\u0026response-content-disposition=attachment%3B+filename%3DDEEP_LEARNING_FOR_DETECTING_MULTIPLE_SPA.pdf\u0026Expires=1743344604\u0026Signature=QJ5jHwHjDoCf6FXJz4TU-AesGbEd-1KSWNTXV5MyiCp5sAIp0PfWv6Xr9XBO7Jgb8DvbrCB6ghORZDzIuhwluQGwWTIRvVfQryvixtoOBSxrZBq~oG-N2Uq52RgLoaJghlsZqlzGyEyq3~znbSlY-SKqbk2kTKr39wt3lXGx35nE37p~rzDsajF9YP-XKQGoZsoFnouFji6HyCxJdxfVXrjsBcixyrgNNk65LmKGBIxp-M2a-CSVZH8D5VIoTjEX4WgtmhZjcgqdMcrOMBZ6br7J4XL-GVRMDbqrCf5XdwLHQJDfvmYjCDxHT37s0zNyJW-uyyWhqH92TwA1saVOeg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":1185,"name":"Image Processing","url":"https://www.academia.edu/Documents/in/Image_Processing"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":15665,"name":"Video Processing","url":"https://www.academia.edu/Documents/in/Video_Processing"},{"id":17701,"name":"Gesture Recognition","url":"https://www.academia.edu/Documents/in/Gesture_Recognition"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":315678,"name":"Human Action Recognition","url":"https://www.academia.edu/Documents/in/Human_Action_Recognition"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-27126098-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="27105432"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/27105432/Belief_functions_Random_sets_for_the_working_scientist_A_IJCAI_2016_Tutorial"><img alt="Research paper thumbnail of Belief functions (Random sets) for the working scientist - A IJCAI 2016 Tutorial" class="work-thumbnail" src="https://attachments.academia-assets.com/47356290/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/27105432/Belief_functions_Random_sets_for_the_working_scientist_A_IJCAI_2016_Tutorial">Belief functions (Random sets) for the working scientist - A IJCAI 2016 Tutorial</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">This half-day tutorial on Belief function (random sets) for the working scientist was presented o...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">This half-day tutorial on Belief function (random sets) for the working scientist was presented on July 9th 2016 at the latest International Joint Conference on Artificial Intelligence (IJCAI-16).<br /><br />The tutorial is very comprehensive (468 slides), covering:<br /><br />(i) a review of mathematical probability and its interpretations (Bayesian and frequentist);<br /><br />(ii) the rational for going beyond standard probability: it's all about the data!<br /><br />(iii) the basis notions of the theory of belief functions;<br /><br />(iv) reasoning with belief functions: inference, combination/conditioning, graphical models, decision making;<br /><br />(v) using belief functions for classification, regression, estimation, etc;<br /><br />(vi) dealing with computational issues and extending belief measures to real numbers;<br /><br />(vii) the main frameworks derived from belief theory, and its relationship with other theories of uncertainty;<br /><br />(viii) a number of example applications;<br /><br />(ix) new horizons, from the formulation of limit theorems for random sets, generalising the notion of likelihood and logistic regression for rare event estimation, climatic change modelling and new foundations for machine learning based on random set theory, a geometry of uncertainty.<br /><br />Tutorial slides are downloadable at <a href="http://cms.brookes.ac.uk/staff/FabioCuzzolin/files/IJCAI2016.pdf" rel="nofollow">http://cms.brookes.ac.uk/staff/FabioCuzzolin/files/IJCAI2016.pdf</a></span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="9f39f2576e8dd387ac3d99cb53b5b176" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47356290,"asset_id":27105432,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47356290/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="27105432"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="27105432"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 27105432; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=27105432]").text(description); $(".js-view-count[data-work-id=27105432]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 27105432; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='27105432']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "9f39f2576e8dd387ac3d99cb53b5b176" } } $('.js-work-strip[data-work-id=27105432]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":27105432,"title":"Belief functions (Random sets) for the working scientist - A IJCAI 2016 Tutorial","translated_title":"","metadata":{"abstract":"This half-day tutorial on Belief function (random sets) for the working scientist was presented on July 9th 2016 at the latest International Joint Conference on Artificial Intelligence (IJCAI-16).\n\nThe tutorial is very comprehensive (468 slides), covering:\n\n(i) a review of mathematical probability and its interpretations (Bayesian and frequentist);\n\n(ii) the rational for going beyond standard probability: it's all about the data!\n\n(iii) the basis notions of the theory of belief functions;\n\n(iv) reasoning with belief functions: inference, combination/conditioning, graphical models, decision making;\n\n(v) using belief functions for classification, regression, estimation, etc;\n\n(vi) dealing with computational issues and extending belief measures to real numbers;\n\n(vii) the main frameworks derived from belief theory, and its relationship with other theories of uncertainty;\n\n(viii) a number of example applications;\n\n(ix) new horizons, from the formulation of limit theorems for random sets, generalising the notion of likelihood and logistic regression for rare event estimation, climatic change modelling and new foundations for machine learning based on random set theory, a geometry of uncertainty.\n\nTutorial slides are downloadable at http://cms.brookes.ac.uk/staff/FabioCuzzolin/files/IJCAI2016.pdf"},"translated_abstract":"This half-day tutorial on Belief function (random sets) for the working scientist was presented on July 9th 2016 at the latest International Joint Conference on Artificial Intelligence (IJCAI-16).\n\nThe tutorial is very comprehensive (468 slides), covering:\n\n(i) a review of mathematical probability and its interpretations (Bayesian and frequentist);\n\n(ii) the rational for going beyond standard probability: it's all about the data!\n\n(iii) the basis notions of the theory of belief functions;\n\n(iv) reasoning with belief functions: inference, combination/conditioning, graphical models, decision making;\n\n(v) using belief functions for classification, regression, estimation, etc;\n\n(vi) dealing with computational issues and extending belief measures to real numbers;\n\n(vii) the main frameworks derived from belief theory, and its relationship with other theories of uncertainty;\n\n(viii) a number of example applications;\n\n(ix) new horizons, from the formulation of limit theorems for random sets, generalising the notion of likelihood and logistic regression for rare event estimation, climatic change modelling and new foundations for machine learning based on random set theory, a geometry of uncertainty.\n\nTutorial slides are downloadable at http://cms.brookes.ac.uk/staff/FabioCuzzolin/files/IJCAI2016.pdf","internal_url":"https://www.academia.edu/27105432/Belief_functions_Random_sets_for_the_working_scientist_A_IJCAI_2016_Tutorial","translated_internal_url":"","created_at":"2016-07-19T10:31:11.572-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[],"downloadable_attachments":[{"id":47356290,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47356290/thumbnails/1.jpg","file_name":"IJCAI2016.pdf","download_url":"https://www.academia.edu/attachments/47356290/download_file","bulk_download_file_name":"Belief_functions_Random_sets_for_the_wor.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47356290/IJCAI2016-libre.pdf?1468949705=\u0026response-content-disposition=attachment%3B+filename%3DBelief_functions_Random_sets_for_the_wor.pdf\u0026Expires=1743273935\u0026Signature=WtV1Fqa3wzYKWCai~YEVRFnUIYrlfZpyNwd2bFrbBbtulfreUdmCRdzuVywUYEkrp~dH9AVcckNtGwi0BDpYlzGUdGMyvRwdu4UHw5FNvknQTpBdDBG3xTqCquyZJ3JOt7bY9pR7Uda7kDywYfVCZTK0sQLW0cnie4VnzPBRanCpCh5n5anEI7vBPtngbQR~TiIxDDSznGQY4f3br9nABXC0mBXSnKgWvO69YTl~QQjV7W02ks4UkOTEZkZdD4OxuasIoKbYnOq2yVR4K-H4B2rKTvirHCpjXJvx5bX2HZNJcfoZUTtDE~qpY0rM-ii8AfdnZR6ADLk12jpFXVU55Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Belief_functions_Random_sets_for_the_working_scientist_A_IJCAI_2016_Tutorial","translated_slug":"","page_count":477,"language":"en","content_type":"Work","summary":"This half-day tutorial on Belief function (random sets) for the working scientist was presented on July 9th 2016 at the latest International Joint Conference on Artificial Intelligence (IJCAI-16).\n\nThe tutorial is very comprehensive (468 slides), covering:\n\n(i) a review of mathematical probability and its interpretations (Bayesian and frequentist);\n\n(ii) the rational for going beyond standard probability: it's all about the data!\n\n(iii) the basis notions of the theory of belief functions;\n\n(iv) reasoning with belief functions: inference, combination/conditioning, graphical models, decision making;\n\n(v) using belief functions for classification, regression, estimation, etc;\n\n(vi) dealing with computational issues and extending belief measures to real numbers;\n\n(vii) the main frameworks derived from belief theory, and its relationship with other theories of uncertainty;\n\n(viii) a number of example applications;\n\n(ix) new horizons, from the formulation of limit theorems for random sets, generalising the notion of likelihood and logistic regression for rare event estimation, climatic change modelling and new foundations for machine learning based on random set theory, a geometry of uncertainty.\n\nTutorial slides are downloadable at http://cms.brookes.ac.uk/staff/FabioCuzzolin/files/IJCAI2016.pdf","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin","email":"YWF0UkFrakRvZjMzei9BNDdnOENKRGdZNi83L2NuODlLdmlVR1FjYWdBQ0FQNlREV2ozdEU5a0oyRlBCRXhOSi0tN1FhU25xM3BtZDkvTytadjlEWTFwUT09--d208bff7035182331d10d652e69c53a0dfbbfe58"},"attachments":[{"id":47356290,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47356290/thumbnails/1.jpg","file_name":"IJCAI2016.pdf","download_url":"https://www.academia.edu/attachments/47356290/download_file","bulk_download_file_name":"Belief_functions_Random_sets_for_the_wor.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47356290/IJCAI2016-libre.pdf?1468949705=\u0026response-content-disposition=attachment%3B+filename%3DBelief_functions_Random_sets_for_the_wor.pdf\u0026Expires=1743273935\u0026Signature=WtV1Fqa3wzYKWCai~YEVRFnUIYrlfZpyNwd2bFrbBbtulfreUdmCRdzuVywUYEkrp~dH9AVcckNtGwi0BDpYlzGUdGMyvRwdu4UHw5FNvknQTpBdDBG3xTqCquyZJ3JOt7bY9pR7Uda7kDywYfVCZTK0sQLW0cnie4VnzPBRanCpCh5n5anEI7vBPtngbQR~TiIxDDSznGQY4f3br9nABXC0mBXSnKgWvO69YTl~QQjV7W02ks4UkOTEZkZdD4OxuasIoKbYnOq2yVR4K-H4B2rKTvirHCpjXJvx5bX2HZNJcfoZUTtDE~qpY0rM-ii8AfdnZR6ADLk12jpFXVU55Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":1352,"name":"Multivariate Statistics","url":"https://www.academia.edu/Documents/in/Multivariate_Statistics"},{"id":2606,"name":"Innovation statistics","url":"https://www.academia.edu/Documents/in/Innovation_statistics"},{"id":4060,"name":"Applied Statistics","url":"https://www.academia.edu/Documents/in/Applied_Statistics"},{"id":4165,"name":"Fuzzy Logic","url":"https://www.academia.edu/Documents/in/Fuzzy_Logic"},{"id":5187,"name":"Statistical Analysis","url":"https://www.academia.edu/Documents/in/Statistical_Analysis"},{"id":5394,"name":"Fuzzy set theory","url":"https://www.academia.edu/Documents/in/Fuzzy_set_theory"},{"id":6132,"name":"Soft Computing","url":"https://www.academia.edu/Documents/in/Soft_Computing"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":9796,"name":"Philosophy Of Probability","url":"https://www.academia.edu/Documents/in/Philosophy_Of_Probability"},{"id":9898,"name":"Soft Computing, Image Processing and Robotics","url":"https://www.academia.edu/Documents/in/Soft_Computing_Image_Processing_and_Robotics"},{"id":13000,"name":"Dempster-Shafer Analysis","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Analysis"},{"id":14585,"name":"Statistical Modeling","url":"https://www.academia.edu/Documents/in/Statistical_Modeling"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":16103,"name":"Fuzzy Systems","url":"https://www.academia.edu/Documents/in/Fuzzy_Systems"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":31900,"name":"Fuzzy","url":"https://www.academia.edu/Documents/in/Fuzzy"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":38511,"name":"Fuzzy Clustering","url":"https://www.academia.edu/Documents/in/Fuzzy_Clustering"},{"id":39699,"name":"Probabilistic Graphical Models","url":"https://www.academia.edu/Documents/in/Probabilistic_Graphical_Models"},{"id":41239,"name":"Bayesian statistics \u0026 modelling","url":"https://www.academia.edu/Documents/in/Bayesian_statistics_and_modelling"},{"id":41815,"name":"Applied Probability","url":"https://www.academia.edu/Documents/in/Applied_Probability"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":74262,"name":"Philosophy of Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Philosophy_of_Artificial_Intelligence"},{"id":83635,"name":"OPerations research and statistics","url":"https://www.academia.edu/Documents/in/OPerations_research_and_statistics"},{"id":94224,"name":"Theory of Evidence","url":"https://www.academia.edu/Documents/in/Theory_of_Evidence"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":141502,"name":"Inteligencia artificial","url":"https://www.academia.edu/Documents/in/Inteligencia_artificial-1"},{"id":224842,"name":"Risk and Uncertainty in decision-making","url":"https://www.academia.edu/Documents/in/Risk_and_Uncertainty_in_decision-making"},{"id":251903,"name":"Artificial Intelligence And Techniques","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence_And_Techniques"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"},{"id":304534,"name":"Dempster-Shafer Theory of Evidence","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Theory_of_Evidence"},{"id":343413,"name":"Applied Probability and Statistics","url":"https://www.academia.edu/Documents/in/Applied_Probability_and_Statistics"},{"id":388873,"name":"Mathematics and Statistics","url":"https://www.academia.edu/Documents/in/Mathematics_and_Statistics"},{"id":571797,"name":"Introduction to Probability","url":"https://www.academia.edu/Documents/in/Introduction_to_Probability"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"},{"id":1005286,"name":"Dempster Shafer Theory","url":"https://www.academia.edu/Documents/in/Dempster_Shafer_Theory"},{"id":1223686,"name":"Artificial Intelligent and Soft Computing Methodologies","url":"https://www.academia.edu/Documents/in/Artificial_Intelligent_and_Soft_Computing_Methodologies"},{"id":1745278,"name":"Decision Making Using Dempster-Shafer Theory of Evidence","url":"https://www.academia.edu/Documents/in/Decision_Making_Using_Dempster-Shafer_Theory_of_Evidence"}],"urls":[{"id":7341538,"url":"http://cms.brookes.ac.uk/staff/FabioCuzzolin/files/IJCAI2016.pdf"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-27105432-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="16450809"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/16450809/THE_GEOMETRY_OF_UNCERTAINTY"><img alt="Research paper thumbnail of THE GEOMETRY OF UNCERTAINTY" class="work-thumbnail" src="https://attachments.academia-assets.com/38999332/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/16450809/THE_GEOMETRY_OF_UNCERTAINTY">THE GEOMETRY OF UNCERTAINTY</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">WHY A MATHEMATICS OF UNCERTAINTY? - probabilities do not represent well ignorance and lack of da...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">WHY A MATHEMATICS OF UNCERTAINTY?<br /><br />- probabilities do not represent well ignorance and lack of data;<br />- evidence is normally limited, rather than infinite as assumed by (frequentist) probability;<br />- expert knowledge needs often to be combined with hard evidence;<br />- in extreme cases (rare events or far-future predictions) very little data;<br />- bottom line: not enough evidence to determine the actual probability describing the problem.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-16450809-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-16450809-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68794/figure-1-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68803/figure-2-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68808/figure-3-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68814/figure-4-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68822/figure-5-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68834/figure-6-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68843/figure-7-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68853/figure-8-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68863/figure-9-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68870/figure-10-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68883/figure-11-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68890/figure-12-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68901/figure-13-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68904/figure-14-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_014.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68912/figure-15-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_015.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68921/figure-16-focus-of-pair-of-simplices"><img alt="Focus of a pair of simplices " class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_016.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68930/figure-17-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_017.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68935/figure-18-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_018.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68946/figure-19-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_019.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68949/figure-20-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_020.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68955/figure-21-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_021.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68960/figure-22-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_022.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68969/figure-23-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_023.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68975/figure-24-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_024.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68979/figure-25-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_025.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68988/figure-26-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_026.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68991/figure-27-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_027.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/68997/figure-28-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_028.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69003/figure-29-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_029.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69013/figure-30-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_030.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69019/figure-31-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_031.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69027/figure-32-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_032.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69034/figure-33-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_033.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69041/figure-34-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_034.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69047/figure-35-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_035.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69052/figure-36-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_036.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69058/figure-37-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_037.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69063/figure-38-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_038.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69066/figure-39-the-geometry-of-uncertainty"><img alt="www.izfp.fraunhofer.de/de/Presse/Downloads.html " class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_039.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69072/figure-40-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_040.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69080/figure-41-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_041.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69087/figure-42-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_042.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69094/figure-43-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_043.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69102/figure-44-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_044.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/69109/figure-45-the-geometry-of-uncertainty"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/38999332/figure_045.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-16450809-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="0925b2c62854193b4bcd545f23430cf5" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":38999332,"asset_id":16450809,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/38999332/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="16450809"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="16450809"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 16450809; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=16450809]").text(description); $(".js-view-count[data-work-id=16450809]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 16450809; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='16450809']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "0925b2c62854193b4bcd545f23430cf5" } } $('.js-work-strip[data-work-id=16450809]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":16450809,"title":"THE GEOMETRY OF UNCERTAINTY","translated_title":"","metadata":{"abstract":"WHY A MATHEMATICS OF UNCERTAINTY?\n\n- probabilities do not represent well ignorance and lack of data;\n- evidence is normally limited, rather than infinite as assumed by (frequentist) probability;\n- expert knowledge needs often to be combined with hard evidence;\n- in extreme cases (rare events or far-future predictions) very little data;\n- bottom line: not enough evidence to determine the actual probability describing the problem.","ai_title_tag":"Rethinking Probability: Addressing Uncertainty in Mathematics"},"translated_abstract":"WHY A MATHEMATICS OF UNCERTAINTY?\n\n- probabilities do not represent well ignorance and lack of data;\n- evidence is normally limited, rather than infinite as assumed by (frequentist) probability;\n- expert knowledge needs often to be combined with hard evidence;\n- in extreme cases (rare events or far-future predictions) very little data;\n- bottom line: not enough evidence to determine the actual probability describing the problem.","internal_url":"https://www.academia.edu/16450809/THE_GEOMETRY_OF_UNCERTAINTY","translated_internal_url":"","created_at":"2015-10-04T09:26:07.364-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"other","co_author_tags":[],"downloadable_attachments":[{"id":38999332,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/38999332/thumbnails/1.jpg","file_name":"ulster.pdf","download_url":"https://www.academia.edu/attachments/38999332/download_file","bulk_download_file_name":"THE_GEOMETRY_OF_UNCERTAINTY.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/38999332/ulster-libre.pdf?1443975814=\u0026response-content-disposition=attachment%3B+filename%3DTHE_GEOMETRY_OF_UNCERTAINTY.pdf\u0026Expires=1743344604\u0026Signature=IEavLH9EeN--I-Fx4yd~U9UYA3rnPB8zWS9DsbCgUcjpacQR57dFrEwZbJry2iU3vlv542El34zItjBd-N5oz1aHrtvosSVSyOcwGa4ilQcU-CLPL3UbzP3i1TsNJZlkKjxF3JNnwW3MGu76yN8CWK5fhbxodekshRpQXTNheeiMv7-duNb7MFT373uco-8wZERh5xxFgH~Qm7bqNLdO5wlta0pjFVJ4s3qZd9Fg4kE63nXa83cnewy3dtUgcBzTZx0PN58J~XJ5qGk6g8jDl1xifuqDXZkeN393F2hBKVFGggl8p7M2poQwYwIoN-6PArPBrgfwa9napWIIAa7qhA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"THE_GEOMETRY_OF_UNCERTAINTY","translated_slug":"","page_count":80,"language":"en","content_type":"Work","summary":"WHY A MATHEMATICS OF UNCERTAINTY?\n\n- probabilities do not represent well ignorance and lack of data;\n- evidence is normally limited, rather than infinite as assumed by (frequentist) probability;\n- expert knowledge needs often to be combined with hard evidence;\n- in extreme cases (rare events or far-future predictions) very little data;\n- bottom line: not enough evidence to determine the actual probability describing the problem.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":38999332,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/38999332/thumbnails/1.jpg","file_name":"ulster.pdf","download_url":"https://www.academia.edu/attachments/38999332/download_file","bulk_download_file_name":"THE_GEOMETRY_OF_UNCERTAINTY.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/38999332/ulster-libre.pdf?1443975814=\u0026response-content-disposition=attachment%3B+filename%3DTHE_GEOMETRY_OF_UNCERTAINTY.pdf\u0026Expires=1743344604\u0026Signature=IEavLH9EeN--I-Fx4yd~U9UYA3rnPB8zWS9DsbCgUcjpacQR57dFrEwZbJry2iU3vlv542El34zItjBd-N5oz1aHrtvosSVSyOcwGa4ilQcU-CLPL3UbzP3i1TsNJZlkKjxF3JNnwW3MGu76yN8CWK5fhbxodekshRpQXTNheeiMv7-duNb7MFT373uco-8wZERh5xxFgH~Qm7bqNLdO5wlta0pjFVJ4s3qZd9Fg4kE63nXa83cnewy3dtUgcBzTZx0PN58J~XJ5qGk6g8jDl1xifuqDXZkeN393F2hBKVFGggl8p7M2poQwYwIoN-6PArPBrgfwa9napWIIAa7qhA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":358,"name":"Convex Geometry","url":"https://www.academia.edu/Documents/in/Convex_Geometry"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":4060,"name":"Applied Statistics","url":"https://www.academia.edu/Documents/in/Applied_Statistics"},{"id":5436,"name":"Combinatorics","url":"https://www.academia.edu/Documents/in/Combinatorics"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":13000,"name":"Dempster-Shafer Analysis","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Analysis"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":54418,"name":"Geometry","url":"https://www.academia.edu/Documents/in/Geometry"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":122987,"name":"Non-probabilistic Modeling and Imprecise Probabilities","url":"https://www.academia.edu/Documents/in/Non-probabilistic_Modeling_and_Imprecise_Probabilities"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"},{"id":304534,"name":"Dempster-Shafer Theory of Evidence","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Theory_of_Evidence"},{"id":571797,"name":"Introduction to Probability","url":"https://www.academia.edu/Documents/in/Introduction_to_Probability"},{"id":655694,"name":"Subjective Probability - Fuzzy Theory and Belief Functions","url":"https://www.academia.edu/Documents/in/Subjective_Probability_-_Fuzzy_Theory_and_Belief_Functions"},{"id":1005286,"name":"Dempster Shafer Theory","url":"https://www.academia.edu/Documents/in/Dempster_Shafer_Theory"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-16450809-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="16449528"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/16449528/UAI_2015_Tutorial_Belief_Functions_for_the_Working_Scientist"><img alt="Research paper thumbnail of UAI 2015 Tutorial - Belief Functions for the Working Scientist" class="work-thumbnail" src="https://attachments.academia-assets.com/38998701/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/16449528/UAI_2015_Tutorial_Belief_Functions_for_the_Working_Scientist">UAI 2015 Tutorial - Belief Functions for the Working Scientist</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theor...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. The methodology is now well established as a general framework for reasoning with uncertainty, with well-understood connections to related frameworks such as probability, possibility, random set and imprecise probability theories. Importantly, in recent years the number of papers published on the theory and application of belief functions has been booming (reaching over 800 in 2014 alone), displaying strong growth in particular in the East Asian community and among practitioners working on multi-criteria decision making, earth sciences, and sensor fusion. Belief functions are a natural tool to cope with heavy uncertainty, lack of evidence and missing data, and extremely rare events.<br /><br />An early debate on the rationale of belief functions gave a strong contribution to the growth and success of the UAI community and series of conference in the Eighties and Nineties, thanks to the contribution of scientists of the caliber of Glenn Shafer, Judea Pearl, Philippe Smets and Prakash Shenoy, among others. Ever since the UAI and BELIEF community have somewhat diverged, and the proposers’ effort has been recently directed towards going back to a closer relationships and exchange of ideas between the two communities. This was one of the aims of the recent BELIEF 2014 International Conference of which the proposers were General Chair and member of the Steering Committee, respectively. A number of books are being published on the subject as we speak, and the impact of the belief function approach to uncertainty is growing.<br /><br />The tutorial aims at bridging the gap between researchers in the field and the wider AI and Uncertainty Theory community, with the longer term goal of a more fruitful collaboration and dissemination of ideas.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="e1383b97dccd3c43992870fbb941542f" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":38998701,"asset_id":16449528,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/38998701/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="16449528"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="16449528"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 16449528; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=16449528]").text(description); $(".js-view-count[data-work-id=16449528]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 16449528; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='16449528']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "e1383b97dccd3c43992870fbb941542f" } } $('.js-work-strip[data-work-id=16449528]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":16449528,"title":"UAI 2015 Tutorial - Belief Functions for the Working Scientist","translated_title":"","metadata":{"abstract":"The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. The methodology is now well established as a general framework for reasoning with uncertainty, with well-understood connections to related frameworks such as probability, possibility, random set and imprecise probability theories. Importantly, in recent years the number of papers published on the theory and application of belief functions has been booming (reaching over 800 in 2014 alone), displaying strong growth in particular in the East Asian community and among practitioners working on multi-criteria decision making, earth sciences, and sensor fusion. Belief functions are a natural tool to cope with heavy uncertainty, lack of evidence and missing data, and extremely rare events.\n\nAn early debate on the rationale of belief functions gave a strong contribution to the growth and success of the UAI community and series of conference in the Eighties and Nineties, thanks to the contribution of scientists of the caliber of Glenn Shafer, Judea Pearl, Philippe Smets and Prakash Shenoy, among others. Ever since the UAI and BELIEF community have somewhat diverged, and the proposers’ effort has been recently directed towards going back to a closer relationships and exchange of ideas between the two communities. This was one of the aims of the recent BELIEF 2014 International Conference of which the proposers were General Chair and member of the Steering Committee, respectively. A number of books are being published on the subject as we speak, and the impact of the belief function approach to uncertainty is growing.\n\nThe tutorial aims at bridging the gap between researchers in the field and the wider AI and Uncertainty Theory community, with the longer term goal of a more fruitful collaboration and dissemination of ideas."},"translated_abstract":"The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. The methodology is now well established as a general framework for reasoning with uncertainty, with well-understood connections to related frameworks such as probability, possibility, random set and imprecise probability theories. Importantly, in recent years the number of papers published on the theory and application of belief functions has been booming (reaching over 800 in 2014 alone), displaying strong growth in particular in the East Asian community and among practitioners working on multi-criteria decision making, earth sciences, and sensor fusion. Belief functions are a natural tool to cope with heavy uncertainty, lack of evidence and missing data, and extremely rare events.\n\nAn early debate on the rationale of belief functions gave a strong contribution to the growth and success of the UAI community and series of conference in the Eighties and Nineties, thanks to the contribution of scientists of the caliber of Glenn Shafer, Judea Pearl, Philippe Smets and Prakash Shenoy, among others. Ever since the UAI and BELIEF community have somewhat diverged, and the proposers’ effort has been recently directed towards going back to a closer relationships and exchange of ideas between the two communities. This was one of the aims of the recent BELIEF 2014 International Conference of which the proposers were General Chair and member of the Steering Committee, respectively. A number of books are being published on the subject as we speak, and the impact of the belief function approach to uncertainty is growing.\n\nThe tutorial aims at bridging the gap between researchers in the field and the wider AI and Uncertainty Theory community, with the longer term goal of a more fruitful collaboration and dissemination of ideas.","internal_url":"https://www.academia.edu/16449528/UAI_2015_Tutorial_Belief_Functions_for_the_Working_Scientist","translated_internal_url":"","created_at":"2015-10-04T08:26:51.816-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"other","co_author_tags":[{"id":6663877,"work_id":16449528,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":1485232,"email":"t***x@utc.fr","display_order":0,"name":"Thierry Denoeux","title":"UAI 2015 Tutorial - Belief Functions for the Working Scientist"}],"downloadable_attachments":[{"id":38998701,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/38998701/thumbnails/1.jpg","file_name":"UAI2015tutorial-modularv2.pdf","download_url":"https://www.academia.edu/attachments/38998701/download_file","bulk_download_file_name":"UAI_2015_Tutorial_Belief_Functions_for_t.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/38998701/UAI2015tutorial-modularv2-libre.pdf?1443972223=\u0026response-content-disposition=attachment%3B+filename%3DUAI_2015_Tutorial_Belief_Functions_for_t.pdf\u0026Expires=1743344604\u0026Signature=Jwg4uklMupaRzc4yiHjJlrHtodBj8lmYHPQNdUghAl6oFGWAn4v9F1uAi-FFnwYZfxZJ0Q5xvikwSr9mhhDMt4r4-dQXv2CH4-dqF-SzUVu29TRKI0WPDSc1ILoFb3XWKHPV9aKoeLkW4OdwYDS44T7pWecMEiGYKmii3MBI5t-CovOiKDxcD9ANL431c2rcznIrHyU4UyKHd64PCgm~KsrUVM-dxWEWff6Tl3hOqSYZdcJAMidXI5Sg1T6xL-L7UpK84A64tpnhlNV3KGIULNGav5bGvznt2SuI~2I6JvN3khawaW73LChCmhn2zInT6cG-pJTEUQt3MtYQYhakxQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"UAI_2015_Tutorial_Belief_Functions_for_the_Working_Scientist","translated_slug":"","page_count":229,"language":"en","content_type":"Work","summary":"The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. The methodology is now well established as a general framework for reasoning with uncertainty, with well-understood connections to related frameworks such as probability, possibility, random set and imprecise probability theories. Importantly, in recent years the number of papers published on the theory and application of belief functions has been booming (reaching over 800 in 2014 alone), displaying strong growth in particular in the East Asian community and among practitioners working on multi-criteria decision making, earth sciences, and sensor fusion. Belief functions are a natural tool to cope with heavy uncertainty, lack of evidence and missing data, and extremely rare events.\n\nAn early debate on the rationale of belief functions gave a strong contribution to the growth and success of the UAI community and series of conference in the Eighties and Nineties, thanks to the contribution of scientists of the caliber of Glenn Shafer, Judea Pearl, Philippe Smets and Prakash Shenoy, among others. Ever since the UAI and BELIEF community have somewhat diverged, and the proposers’ effort has been recently directed towards going back to a closer relationships and exchange of ideas between the two communities. This was one of the aims of the recent BELIEF 2014 International Conference of which the proposers were General Chair and member of the Steering Committee, respectively. A number of books are being published on the subject as we speak, and the impact of the belief function approach to uncertainty is growing.\n\nThe tutorial aims at bridging the gap between researchers in the field and the wider AI and Uncertainty Theory community, with the longer term goal of a more fruitful collaboration and dissemination of ideas.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":38998701,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/38998701/thumbnails/1.jpg","file_name":"UAI2015tutorial-modularv2.pdf","download_url":"https://www.academia.edu/attachments/38998701/download_file","bulk_download_file_name":"UAI_2015_Tutorial_Belief_Functions_for_t.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/38998701/UAI2015tutorial-modularv2-libre.pdf?1443972223=\u0026response-content-disposition=attachment%3B+filename%3DUAI_2015_Tutorial_Belief_Functions_for_t.pdf\u0026Expires=1743344604\u0026Signature=Jwg4uklMupaRzc4yiHjJlrHtodBj8lmYHPQNdUghAl6oFGWAn4v9F1uAi-FFnwYZfxZJ0Q5xvikwSr9mhhDMt4r4-dQXv2CH4-dqF-SzUVu29TRKI0WPDSc1ILoFb3XWKHPV9aKoeLkW4OdwYDS44T7pWecMEiGYKmii3MBI5t-CovOiKDxcD9ANL431c2rcznIrHyU4UyKHd64PCgm~KsrUVM-dxWEWff6Tl3hOqSYZdcJAMidXI5Sg1T6xL-L7UpK84A64tpnhlNV3KGIULNGav5bGvznt2SuI~2I6JvN3khawaW73LChCmhn2zInT6cG-pJTEUQt3MtYQYhakxQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":305,"name":"Applied Mathematics","url":"https://www.academia.edu/Documents/in/Applied_Mathematics"},{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":4060,"name":"Applied Statistics","url":"https://www.academia.edu/Documents/in/Applied_Statistics"},{"id":5394,"name":"Fuzzy set theory","url":"https://www.academia.edu/Documents/in/Fuzzy_set_theory"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":10178,"name":"Rough Sets","url":"https://www.academia.edu/Documents/in/Rough_Sets"},{"id":10182,"name":"Fuzzy Sets","url":"https://www.academia.edu/Documents/in/Fuzzy_Sets"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":74262,"name":"Philosophy of Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Philosophy_of_Artificial_Intelligence"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":141502,"name":"Inteligencia artificial","url":"https://www.academia.edu/Documents/in/Inteligencia_artificial-1"},{"id":265402,"name":"Applied Mathematics and Statistics","url":"https://www.academia.edu/Documents/in/Applied_Mathematics_and_Statistics"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"},{"id":388873,"name":"Mathematics and Statistics","url":"https://www.academia.edu/Documents/in/Mathematics_and_Statistics"},{"id":474590,"name":"Rough set theory","url":"https://www.academia.edu/Documents/in/Rough_set_theory"},{"id":510403,"name":"Possibility","url":"https://www.academia.edu/Documents/in/Possibility"},{"id":571797,"name":"Introduction to Probability","url":"https://www.academia.edu/Documents/in/Introduction_to_Probability"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"},{"id":1223686,"name":"Artificial Intelligent and Soft Computing Methodologies","url":"https://www.academia.edu/Documents/in/Artificial_Intelligent_and_Soft_Computing_Methodologies"}],"urls":[{"id":7046268,"url":"https://www.youtube.com/watch?v=nhGzn0R5TgM"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-16449528-figures'); } }); </script> </div><div class="profile--tab_content_container js-tab-pane tab-pane" data-section-id="3674865" id="booksandmonographs"><div class="js-work-strip profile--work_container" data-work-id="91915982"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/91915982/Continual_Semi_Supervised_Learning_First_International_Workshop_CSSL_2021_Virtual_Event_August_19_20_2021_Revised_Selected_Papers"><img alt="Research paper thumbnail of Continual Semi-Supervised Learning - First International Workshop, CSSL 2021, Virtual Event, August 19–20, 2021, Revised Selected Papers" class="work-thumbnail" src="https://attachments.academia-assets.com/95067953/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/91915982/Continual_Semi_Supervised_Learning_First_International_Workshop_CSSL_2021_Virtual_Event_August_19_20_2021_Revised_Selected_Papers">Continual Semi-Supervised Learning - First International Workshop, CSSL 2021, Virtual Event, August 19–20, 2021, Revised Selected Papers</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Whereas continual learning has recently attracted much attention in the machine learning communit...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Whereas continual learning has recently attracted much attention in the machine learning community, the focus has been mainly on preventing the model updated in the light of new data from ‘catastrophically forgetting’ its initial knowledge and abilities. This, however, is in stark contrast with common real-world situations in which an initial model is trained using limited data, only to be later deployed without any additional supervision. In these scenarios the goal is for the model to be incrementally updated using the new (unlabelled) data, in order to adapt to a target domain continually shifting over time. These situations can be modeled by an original continual semi-supervised learning (CSSL) paradigm. There, an initial training batch of data-points annotated with ground truth (class labels for classification problems, or vectors of target values for regression ones) is available and can be used to train an initial model. Then, however, the model is incrementally updated by exploiting the information provided by a stream of unlabelled data points, each of which is generated by a data generating process (modelled, as typically assumed, by a probability distribution) which varies with time. No artificial subdivision into ‘tasks’ is assumed, as the data-generating distribution may arbitrarily vary over time. The aim of the First International Workshop on Continual Semi-Supervised Learning (CSSL @ IJCAI 2021)1 was to formalise this new learning paradigm and to introduce it to the wider machine learning community, in order to mobilise effort in this direction. As part of the workshop we also presented the first two benchmark datasets for this problem, derived from important computer vision scenarios, and proposed the first Continual Semi-Supervised Learning Challenges to the research community. The workshop encouraged the submission of papers on continual learning in its broader sense, covering topics such as: the suitability of existing datasets for continual learning; new benchmark datasets explicitly designed for continual learning; protocols for training and testing in different continual learning settings; metrics for assessing continual learning methods; traditional task-based continual learning; the relation between continual learning and model adaptation; the distinction between the learning of new classes and the learning from new instances; real-world applications of continual learning; catastrophic forgetting and possible mitigation strategies; applications of transfer learning, multi-task and meta-learning to continual learning; continual supervised, semisupervised and unsupervised learning; lifelong learning; few-shot learning; and continual reinforcement and inverse reinforcement learning. The aim was to foster the debate around all aspects of continual learning, especially those which are the subject of ongoing frontier research. As part of the event, we invited both paper track contributions on the above-mentioned topics as well as submissions of entries to two challenges specifically designed to test CSSL approaches. To this purpose, two new benchmarks, a Continual Activity Recognition (CAR) dataset2 and a Continual Crowd Counting (CCC) dataset, were specifically designed to assess continual semisupervised learning on two important computer vision tasks: activity recognition and crowd counting. Papers submitted to the workshop were asked to follow the standard IJCAI 2021 template (6 pages plus 1 for the references). Paper submission took place through EasyChair. Authors were allowed to submit a supplementary material document with details on their implementation. However, reviewers were not required to consult this additional material when assessing the submission. A double-blind review process was followed. Authors were asked not include any identifying information (names, affiliations, etc.) or links and self-references that could reveal their identities. Each submission received three reviews from members of the Program Committee, which assessed it based on relevance, novelty and potential for impact. No rebuttal stage was introduced. The authors of the accepted papers were asked to guarantee their presence at the workshop, with at least one author for each accepted paper registering for the conference. The workshop allowed for the presentation during the workshop of results published elsewhere, but these papers were not considered for or included in these published proceedings. The paper submission deadline was initially set to June 15, 2021, but was later extended to July 2, 2021. Authors were notified of the result on July 19, 2021, and asked to submit a camera-ready version of their paper by July 31. A total of 14 papers were submitted, of which one was withdrawn and one rejected, for an acceptance rate of 86% of papers presented at the workshop, while the rate of acceptance for papers intended for the published proceedings is 69%, 9 papers. The 20 members of the Program Committee were assigned on average two papers to review each. The workshop issued a Best Paper Award to the author(s) of the best accepted paper, as judged by the Organising Committee based on the reviews assigned by PC members, as well as a Best Student Paper Award, selected in the same way and a Prize to be awarded to the winners of each of the Challenges. The Best Paper Award was assigned to “SPeCiaL: Self-Supervised Pretraining for Continual Learning”, by Lucas Caccia and Joelle Pineau. The Best Student Paper Award was secured by “Hypernetworks for Continual Semi-Supervised Learning”, by Dhanajit Brahma, Vinay Kumar Verma and Piyush Rai.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="6405b08229f9da63e12591b027a45564" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":95067953,"asset_id":91915982,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/95067953/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="91915982"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="91915982"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 91915982; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=91915982]").text(description); $(".js-view-count[data-work-id=91915982]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 91915982; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='91915982']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "6405b08229f9da63e12591b027a45564" } } $('.js-work-strip[data-work-id=91915982]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":91915982,"title":"Continual Semi-Supervised Learning - First International Workshop, CSSL 2021, Virtual Event, August 19–20, 2021, Revised Selected Papers","translated_title":"","metadata":{"doi":"10.1007/978-3-031-17587-9","abstract":"Whereas continual learning has recently attracted much attention in the machine learning community, the focus has been mainly on preventing the model updated in the light of new data from ‘catastrophically forgetting’ its initial knowledge and abilities. This, however, is in stark contrast with common real-world situations in which an initial model is trained using limited data, only to be later deployed without any additional supervision. In these scenarios the goal is for the model to be incrementally updated using the new (unlabelled) data, in order to adapt to a target domain continually shifting over time. These situations can be modeled by an original continual semi-supervised learning (CSSL) paradigm. There, an initial training batch of data-points annotated with ground truth (class labels for classification problems, or vectors of target values for regression ones) is available and can be used to train an initial model. Then, however, the model is incrementally updated by exploiting the information provided by a stream of unlabelled data points, each of which is generated by a data generating process (modelled, as typically assumed, by a probability distribution) which varies with time. No artificial subdivision into ‘tasks’ is assumed, as the data-generating distribution may arbitrarily vary over time. The aim of the First International Workshop on Continual Semi-Supervised Learning (CSSL @ IJCAI 2021)1 was to formalise this new learning paradigm and to introduce it to the wider machine learning community, in order to mobilise effort in this direction. As part of the workshop we also presented the first two benchmark datasets for this problem, derived from important computer vision scenarios, and proposed the first Continual Semi-Supervised Learning Challenges to the research community. The workshop encouraged the submission of papers on continual learning in its broader sense, covering topics such as: the suitability of existing datasets for continual learning; new benchmark datasets explicitly designed for continual learning; protocols for training and testing in different continual learning settings; metrics for assessing continual learning methods; traditional task-based continual learning; the relation between continual learning and model adaptation; the distinction between the learning of new classes and the learning from new instances; real-world applications of continual learning; catastrophic forgetting and possible mitigation strategies; applications of transfer learning, multi-task and meta-learning to continual learning; continual supervised, semisupervised and unsupervised learning; lifelong learning; few-shot learning; and continual reinforcement and inverse reinforcement learning. The aim was to foster the debate around all aspects of continual learning, especially those which are the subject of ongoing frontier research. As part of the event, we invited both paper track contributions on the above-mentioned topics as well as submissions of entries to two challenges specifically designed to test CSSL approaches. To this purpose, two new benchmarks, a Continual Activity Recognition (CAR) dataset2 and a Continual Crowd Counting (CCC) dataset, were specifically designed to assess continual semisupervised learning on two important computer vision tasks: activity recognition and crowd counting. Papers submitted to the workshop were asked to follow the standard IJCAI 2021 template (6 pages plus 1 for the references). Paper submission took place through EasyChair. Authors were allowed to submit a supplementary material document with details on their implementation. However, reviewers were not required to consult this additional material when assessing the submission. A double-blind review process was followed. Authors were asked not include any identifying information (names, affiliations, etc.) or links and self-references that could reveal their identities. Each submission received three reviews from members of the Program Committee, which assessed it based on relevance, novelty and potential for impact. No rebuttal stage was introduced. The authors of the accepted papers were asked to guarantee their presence at the workshop, with at least one author for each accepted paper registering for the conference. The workshop allowed for the presentation during the workshop of results published elsewhere, but these papers were not considered for or included in these published proceedings. The paper submission deadline was initially set to June 15, 2021, but was later extended to July 2, 2021. Authors were notified of the result on July 19, 2021, and asked to submit a camera-ready version of their paper by July 31. A total of 14 papers were submitted, of which one was withdrawn and one rejected, for an acceptance rate of 86% of papers presented at the workshop, while the rate of acceptance for papers intended for the published proceedings is 69%, 9 papers. The 20 members of the Program Committee were assigned on average two papers to review each. The workshop issued a Best Paper Award to the author(s) of the best accepted paper, as judged by the Organising Committee based on the reviews assigned by PC members, as well as a Best Student Paper Award, selected in the same way and a Prize to be awarded to the winners of each of the Challenges. The Best Paper Award was assigned to “SPeCiaL: Self-Supervised Pretraining for Continual Learning”, by Lucas Caccia and Joelle Pineau. The Best Student Paper Award was secured by “Hypernetworks for Continual Semi-Supervised Learning”, by Dhanajit Brahma, Vinay Kumar Verma and Piyush Rai.","ai_title_tag":"Continual Semi-Supervised Learning Workshop 2021","publication_date":{"day":null,"month":null,"year":2022,"errors":{}}},"translated_abstract":"Whereas continual learning has recently attracted much attention in the machine learning community, the focus has been mainly on preventing the model updated in the light of new data from ‘catastrophically forgetting’ its initial knowledge and abilities. This, however, is in stark contrast with common real-world situations in which an initial model is trained using limited data, only to be later deployed without any additional supervision. In these scenarios the goal is for the model to be incrementally updated using the new (unlabelled) data, in order to adapt to a target domain continually shifting over time. These situations can be modeled by an original continual semi-supervised learning (CSSL) paradigm. There, an initial training batch of data-points annotated with ground truth (class labels for classification problems, or vectors of target values for regression ones) is available and can be used to train an initial model. Then, however, the model is incrementally updated by exploiting the information provided by a stream of unlabelled data points, each of which is generated by a data generating process (modelled, as typically assumed, by a probability distribution) which varies with time. No artificial subdivision into ‘tasks’ is assumed, as the data-generating distribution may arbitrarily vary over time. The aim of the First International Workshop on Continual Semi-Supervised Learning (CSSL @ IJCAI 2021)1 was to formalise this new learning paradigm and to introduce it to the wider machine learning community, in order to mobilise effort in this direction. As part of the workshop we also presented the first two benchmark datasets for this problem, derived from important computer vision scenarios, and proposed the first Continual Semi-Supervised Learning Challenges to the research community. The workshop encouraged the submission of papers on continual learning in its broader sense, covering topics such as: the suitability of existing datasets for continual learning; new benchmark datasets explicitly designed for continual learning; protocols for training and testing in different continual learning settings; metrics for assessing continual learning methods; traditional task-based continual learning; the relation between continual learning and model adaptation; the distinction between the learning of new classes and the learning from new instances; real-world applications of continual learning; catastrophic forgetting and possible mitigation strategies; applications of transfer learning, multi-task and meta-learning to continual learning; continual supervised, semisupervised and unsupervised learning; lifelong learning; few-shot learning; and continual reinforcement and inverse reinforcement learning. The aim was to foster the debate around all aspects of continual learning, especially those which are the subject of ongoing frontier research. As part of the event, we invited both paper track contributions on the above-mentioned topics as well as submissions of entries to two challenges specifically designed to test CSSL approaches. To this purpose, two new benchmarks, a Continual Activity Recognition (CAR) dataset2 and a Continual Crowd Counting (CCC) dataset, were specifically designed to assess continual semisupervised learning on two important computer vision tasks: activity recognition and crowd counting. Papers submitted to the workshop were asked to follow the standard IJCAI 2021 template (6 pages plus 1 for the references). Paper submission took place through EasyChair. Authors were allowed to submit a supplementary material document with details on their implementation. However, reviewers were not required to consult this additional material when assessing the submission. A double-blind review process was followed. Authors were asked not include any identifying information (names, affiliations, etc.) or links and self-references that could reveal their identities. Each submission received three reviews from members of the Program Committee, which assessed it based on relevance, novelty and potential for impact. No rebuttal stage was introduced. The authors of the accepted papers were asked to guarantee their presence at the workshop, with at least one author for each accepted paper registering for the conference. The workshop allowed for the presentation during the workshop of results published elsewhere, but these papers were not considered for or included in these published proceedings. The paper submission deadline was initially set to June 15, 2021, but was later extended to July 2, 2021. Authors were notified of the result on July 19, 2021, and asked to submit a camera-ready version of their paper by July 31. A total of 14 papers were submitted, of which one was withdrawn and one rejected, for an acceptance rate of 86% of papers presented at the workshop, while the rate of acceptance for papers intended for the published proceedings is 69%, 9 papers. The 20 members of the Program Committee were assigned on average two papers to review each. The workshop issued a Best Paper Award to the author(s) of the best accepted paper, as judged by the Organising Committee based on the reviews assigned by PC members, as well as a Best Student Paper Award, selected in the same way and a Prize to be awarded to the winners of each of the Challenges. The Best Paper Award was assigned to “SPeCiaL: Self-Supervised Pretraining for Continual Learning”, by Lucas Caccia and Joelle Pineau. The Best Student Paper Award was secured by “Hypernetworks for Continual Semi-Supervised Learning”, by Dhanajit Brahma, Vinay Kumar Verma and Piyush Rai.","internal_url":"https://www.academia.edu/91915982/Continual_Semi_Supervised_Learning_First_International_Workshop_CSSL_2021_Virtual_Event_August_19_20_2021_Revised_Selected_Papers","translated_internal_url":"","created_at":"2022-11-30T03:52:41.644-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"paper","co_author_tags":[{"id":39135062,"work_id":91915982,"tagging_user_id":366407,"tagged_user_id":32878615,"co_author_invite_id":null,"email":"v***o@studio.unibo.it","affiliation":"Università di Bologna","display_order":1,"name":"Vincenzo Lomonaco","title":"Continual Semi-Supervised Learning - First International Workshop, CSSL 2021, Virtual Event, August 19–20, 2021, Revised Selected Papers"}],"downloadable_attachments":[{"id":95067953,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/95067953/thumbnails/1.jpg","file_name":"Front_matter.pdf","download_url":"https://www.academia.edu/attachments/95067953/download_file","bulk_download_file_name":"Continual_Semi_Supervised_Learning_First.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/95067953/Front_matter-libre.pdf?1669812831=\u0026response-content-disposition=attachment%3B+filename%3DContinual_Semi_Supervised_Learning_First.pdf\u0026Expires=1743344605\u0026Signature=PBsBJlXFrPSE7xbujJh4D5UeLZIcPo2N4K4tQOW9jb6K3OO9DlVIPDAcSK5S2JbJcXL3-7W2cxwHdzbHNwkvuv~EgW9Jk0FEeCx0Myq8HsVofggPtInh0tvgFzfhVQ2u~IL2RwSygYj64mcmW6GixAiXOn4pua3y9vJeMS~G52tLAtGtWtPYx957DadKapl--UrNfqhbCBDbhSFS0iMM~QrNIXll0KB8B-AduWHO0MulP8PU8j6lC-PA44JhGjOSPxvOldlxJ~eCu4i~ZdCyKTbOvDHQMEugq78Ckv9zQ0J9uVZFLbXaT-RLHvh9JJB5aOYSEDCSM~kHbPiUJpeCfA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Continual_Semi_Supervised_Learning_First_International_Workshop_CSSL_2021_Virtual_Event_August_19_20_2021_Revised_Selected_Papers","translated_slug":"","page_count":13,"language":"en","content_type":"Work","summary":"Whereas continual learning has recently attracted much attention in the machine learning community, the focus has been mainly on preventing the model updated in the light of new data from ‘catastrophically forgetting’ its initial knowledge and abilities. This, however, is in stark contrast with common real-world situations in which an initial model is trained using limited data, only to be later deployed without any additional supervision. In these scenarios the goal is for the model to be incrementally updated using the new (unlabelled) data, in order to adapt to a target domain continually shifting over time. These situations can be modeled by an original continual semi-supervised learning (CSSL) paradigm. There, an initial training batch of data-points annotated with ground truth (class labels for classification problems, or vectors of target values for regression ones) is available and can be used to train an initial model. Then, however, the model is incrementally updated by exploiting the information provided by a stream of unlabelled data points, each of which is generated by a data generating process (modelled, as typically assumed, by a probability distribution) which varies with time. No artificial subdivision into ‘tasks’ is assumed, as the data-generating distribution may arbitrarily vary over time. The aim of the First International Workshop on Continual Semi-Supervised Learning (CSSL @ IJCAI 2021)1 was to formalise this new learning paradigm and to introduce it to the wider machine learning community, in order to mobilise effort in this direction. As part of the workshop we also presented the first two benchmark datasets for this problem, derived from important computer vision scenarios, and proposed the first Continual Semi-Supervised Learning Challenges to the research community. The workshop encouraged the submission of papers on continual learning in its broader sense, covering topics such as: the suitability of existing datasets for continual learning; new benchmark datasets explicitly designed for continual learning; protocols for training and testing in different continual learning settings; metrics for assessing continual learning methods; traditional task-based continual learning; the relation between continual learning and model adaptation; the distinction between the learning of new classes and the learning from new instances; real-world applications of continual learning; catastrophic forgetting and possible mitigation strategies; applications of transfer learning, multi-task and meta-learning to continual learning; continual supervised, semisupervised and unsupervised learning; lifelong learning; few-shot learning; and continual reinforcement and inverse reinforcement learning. The aim was to foster the debate around all aspects of continual learning, especially those which are the subject of ongoing frontier research. As part of the event, we invited both paper track contributions on the above-mentioned topics as well as submissions of entries to two challenges specifically designed to test CSSL approaches. To this purpose, two new benchmarks, a Continual Activity Recognition (CAR) dataset2 and a Continual Crowd Counting (CCC) dataset, were specifically designed to assess continual semisupervised learning on two important computer vision tasks: activity recognition and crowd counting. Papers submitted to the workshop were asked to follow the standard IJCAI 2021 template (6 pages plus 1 for the references). Paper submission took place through EasyChair. Authors were allowed to submit a supplementary material document with details on their implementation. However, reviewers were not required to consult this additional material when assessing the submission. A double-blind review process was followed. Authors were asked not include any identifying information (names, affiliations, etc.) or links and self-references that could reveal their identities. Each submission received three reviews from members of the Program Committee, which assessed it based on relevance, novelty and potential for impact. No rebuttal stage was introduced. The authors of the accepted papers were asked to guarantee their presence at the workshop, with at least one author for each accepted paper registering for the conference. The workshop allowed for the presentation during the workshop of results published elsewhere, but these papers were not considered for or included in these published proceedings. The paper submission deadline was initially set to June 15, 2021, but was later extended to July 2, 2021. Authors were notified of the result on July 19, 2021, and asked to submit a camera-ready version of their paper by July 31. A total of 14 papers were submitted, of which one was withdrawn and one rejected, for an acceptance rate of 86% of papers presented at the workshop, while the rate of acceptance for papers intended for the published proceedings is 69%, 9 papers. The 20 members of the Program Committee were assigned on average two papers to review each. The workshop issued a Best Paper Award to the author(s) of the best accepted paper, as judged by the Organising Committee based on the reviews assigned by PC members, as well as a Best Student Paper Award, selected in the same way and a Prize to be awarded to the winners of each of the Challenges. The Best Paper Award was assigned to “SPeCiaL: Self-Supervised Pretraining for Continual Learning”, by Lucas Caccia and Joelle Pineau. The Best Student Paper Award was secured by “Hypernetworks for Continual Semi-Supervised Learning”, by Dhanajit Brahma, Vinay Kumar Verma and Piyush Rai.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":95067953,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/95067953/thumbnails/1.jpg","file_name":"Front_matter.pdf","download_url":"https://www.academia.edu/attachments/95067953/download_file","bulk_download_file_name":"Continual_Semi_Supervised_Learning_First.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/95067953/Front_matter-libre.pdf?1669812831=\u0026response-content-disposition=attachment%3B+filename%3DContinual_Semi_Supervised_Learning_First.pdf\u0026Expires=1743344605\u0026Signature=PBsBJlXFrPSE7xbujJh4D5UeLZIcPo2N4K4tQOW9jb6K3OO9DlVIPDAcSK5S2JbJcXL3-7W2cxwHdzbHNwkvuv~EgW9Jk0FEeCx0Myq8HsVofggPtInh0tvgFzfhVQ2u~IL2RwSygYj64mcmW6GixAiXOn4pua3y9vJeMS~G52tLAtGtWtPYx957DadKapl--UrNfqhbCBDbhSFS0iMM~QrNIXll0KB8B-AduWHO0MulP8PU8j6lC-PA44JhGjOSPxvOldlxJ~eCu4i~ZdCyKTbOvDHQMEugq78Ckv9zQ0J9uVZFLbXaT-RLHvh9JJB5aOYSEDCSM~kHbPiUJpeCfA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":141502,"name":"Inteligencia artificial","url":"https://www.academia.edu/Documents/in/Inteligencia_artificial-1"},{"id":143038,"name":"Machine Learning and Pattern Recognition","url":"https://www.academia.edu/Documents/in/Machine_Learning_and_Pattern_Recognition"},{"id":332460,"name":"Continual Learning","url":"https://www.academia.edu/Documents/in/Continual_Learning"},{"id":559503,"name":"Machine Learning Big Data","url":"https://www.academia.edu/Documents/in/Machine_Learning_Big_Data"},{"id":1223686,"name":"Artificial Intelligent and Soft Computing Methodologies","url":"https://www.academia.edu/Documents/in/Artificial_Intelligent_and_Soft_Computing_Methodologies"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-91915982-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="37605456"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/37605456/Belief_Functions_Theory_and_Applications_5th_International_Conference_BELIEF_2018_Proceedings"><img alt="Research paper thumbnail of Belief Functions: Theory and Applications - 5th International Conference, BELIEF 2018, Proceedings" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title">Belief Functions: Theory and Applications - 5th International Conference, BELIEF 2018, Proceedings</div><div class="wp-workCard_item"><span>Lecture Notes in Computer Science</span><span>, 2018</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">This book constitutes the refereed proceedings of the 5th International Conference on Belief Func...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">This book constitutes the refereed proceedings of the 5th International Conference on Belief Functions, BELIEF 2018, held in Compiègne, France, in September 2018.The 33 revised regular papers presented in this book were carefully selected and reviewed from 73 submissions. The papers were solicited on theoretical aspects (including for example statistical inference, mathematical foundations, continuous belief functions) as well as on applications in various areas including classification, statistics, data fusion, network analysis and intelligent vehicles.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="37605456"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="37605456"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 37605456; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=37605456]").text(description); $(".js-view-count[data-work-id=37605456]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 37605456; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='37605456']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=37605456]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":37605456,"title":"Belief Functions: Theory and Applications - 5th International Conference, BELIEF 2018, Proceedings","translated_title":"","metadata":{"doi":"10.1007/978-3-319-99383-6","volume":"11069","abstract":"This book constitutes the refereed proceedings of the 5th International Conference on Belief Functions, BELIEF 2018, held in Compiègne, France, in September 2018.The 33 revised regular papers presented in this book were carefully selected and reviewed from 73 submissions. The papers were solicited on theoretical aspects (including for example statistical inference, mathematical foundations, continuous belief functions) as well as on applications in various areas including classification, statistics, data fusion, network analysis and intelligent vehicles.","page_numbers":"280","publication_date":{"day":null,"month":null,"year":2018,"errors":{}},"publication_name":"Lecture Notes in Computer Science"},"translated_abstract":"This book constitutes the refereed proceedings of the 5th International Conference on Belief Functions, BELIEF 2018, held in Compiègne, France, in September 2018.The 33 revised regular papers presented in this book were carefully selected and reviewed from 73 submissions. The papers were solicited on theoretical aspects (including for example statistical inference, mathematical foundations, continuous belief functions) as well as on applications in various areas including classification, statistics, data fusion, network analysis and intelligent vehicles.","internal_url":"https://www.academia.edu/37605456/Belief_Functions_Theory_and_Applications_5th_International_Conference_BELIEF_2018_Proceedings","translated_internal_url":"","created_at":"2018-10-18T04:20:07.152-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"other","co_author_tags":[{"id":31966104,"work_id":37605456,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":913728,"email":"t***x@utc.fr","display_order":0,"name":"Thierry Denoeux","title":"Belief Functions: Theory and Applications - 5th International Conference, BELIEF 2018, Proceedings"},{"id":31966105,"work_id":37605456,"tagging_user_id":366407,"tagged_user_id":2032150,"co_author_invite_id":null,"email":"s***e@gmail.com","affiliation":"Compiegne Technology University","display_order":4194304,"name":"Sebastien Destercke","title":"Belief Functions: Theory and Applications - 5th International Conference, BELIEF 2018, Proceedings"}],"downloadable_attachments":[],"slug":"Belief_Functions_Theory_and_Applications_5th_International_Conference_BELIEF_2018_Proceedings","translated_slug":"","page_count":null,"language":"en","content_type":"Work","summary":"This book constitutes the refereed proceedings of the 5th International Conference on Belief Functions, BELIEF 2018, held in Compiègne, France, in September 2018.The 33 revised regular papers presented in this book were carefully selected and reviewed from 73 submissions. The papers were solicited on theoretical aspects (including for example statistical inference, mathematical foundations, continuous belief functions) as well as on applications in various areas including classification, statistics, data fusion, network analysis and intelligent vehicles.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[],"research_interests":[{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":94224,"name":"Theory of Evidence","url":"https://www.academia.edu/Documents/in/Theory_of_Evidence"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":304534,"name":"Dempster-Shafer Theory of Evidence","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Theory_of_Evidence"},{"id":492766,"name":"Random Sets","url":"https://www.academia.edu/Documents/in/Random_Sets"}],"urls":[{"id":8609700,"url":"https://www.springer.com/us/book/9783319993829"},{"id":26514987,"url":"https://link.springer.com/book/10.1007/978-3-319-99383-6"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-37605456-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="16451041"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" rel="nofollow" href="https://www.academia.edu/16451041/Belief_Functions_Theory_and_Applications"><img alt="Research paper thumbnail of Belief Functions: Theory and Applications" class="work-thumbnail" src="https://a.academia-assets.com/images/blank-paper.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title">Belief Functions: Theory and Applications</div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">This book constitutes the thoroughly refereed proceedings of the Third International Conference o...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">This book constitutes the thoroughly refereed proceedings of the Third International Conference on Belief Functions, BELIEF 2014, held in Oxford, UK, in September 2014. The 47 revised full papers presented in this book were carefully selected and reviewed from 56 submissions. The papers are organized in topical sections on belief combination; machine learning; applications; theory; networks; information fusion; data association; and geometry.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="16451041"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="16451041"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 16451041; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=16451041]").text(description); $(".js-view-count[data-work-id=16451041]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 16451041; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='16451041']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (false){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "-1" } } $('.js-work-strip[data-work-id=16451041]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":16451041,"title":"Belief Functions: Theory and Applications","translated_title":"","metadata":{"abstract":"This book constitutes the thoroughly refereed proceedings of the Third International Conference on Belief Functions, BELIEF 2014, held in Oxford, UK, in September 2014. The 47 revised full papers presented in this book were carefully selected and reviewed from 56 submissions. The papers are organized in topical sections on belief combination; machine learning; applications; theory; networks; information fusion; data association; and geometry."},"translated_abstract":"This book constitutes the thoroughly refereed proceedings of the Third International Conference on Belief Functions, BELIEF 2014, held in Oxford, UK, in September 2014. The 47 revised full papers presented in this book were carefully selected and reviewed from 56 submissions. The papers are organized in topical sections on belief combination; machine learning; applications; theory; networks; information fusion; data association; and geometry.","internal_url":"https://www.academia.edu/16451041/Belief_Functions_Theory_and_Applications","translated_internal_url":"","created_at":"2015-10-04T09:39:48.986-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"other","co_author_tags":[],"downloadable_attachments":[],"slug":"Belief_Functions_Theory_and_Applications","translated_slug":"","page_count":null,"language":"en","content_type":"Work","summary":"This book constitutes the thoroughly refereed proceedings of the Third International Conference on Belief Functions, BELIEF 2014, held in Oxford, UK, in September 2014. The 47 revised full papers presented in this book were carefully selected and reviewed from 56 submissions. The papers are organized in topical sections on belief combination; machine learning; applications; theory; networks; information fusion; data association; and geometry.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[],"research_interests":[{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":9796,"name":"Philosophy Of Probability","url":"https://www.academia.edu/Documents/in/Philosophy_Of_Probability"},{"id":13000,"name":"Dempster-Shafer Analysis","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Analysis"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":74262,"name":"Philosophy of Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Philosophy_of_Artificial_Intelligence"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":122987,"name":"Non-probabilistic Modeling and Imprecise Probabilities","url":"https://www.academia.edu/Documents/in/Non-probabilistic_Modeling_and_Imprecise_Probabilities"},{"id":141502,"name":"Inteligencia artificial","url":"https://www.academia.edu/Documents/in/Inteligencia_artificial-1"},{"id":224842,"name":"Risk and Uncertainty in decision-making","url":"https://www.academia.edu/Documents/in/Risk_and_Uncertainty_in_decision-making"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"},{"id":304534,"name":"Dempster-Shafer Theory of Evidence","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Theory_of_Evidence"},{"id":571797,"name":"Introduction to Probability","url":"https://www.academia.edu/Documents/in/Introduction_to_Probability"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"},{"id":655694,"name":"Subjective Probability - Fuzzy Theory and Belief Functions","url":"https://www.academia.edu/Documents/in/Subjective_Probability_-_Fuzzy_Theory_and_Belief_Functions"},{"id":1005286,"name":"Dempster Shafer Theory","url":"https://www.academia.edu/Documents/in/Dempster_Shafer_Theory"},{"id":1745278,"name":"Decision Making Using Dempster-Shafer Theory of Evidence","url":"https://www.academia.edu/Documents/in/Decision_Making_Using_Dempster-Shafer_Theory_of_Evidence"}],"urls":[{"id":5921137,"url":"http://www.springer.com/gb/book/9783319111902"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-16451041-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="16449653"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/16449653/Visions_of_a_Generalised_Probability_Theory"><img alt="Research paper thumbnail of Visions of a Generalised Probability Theory" class="work-thumbnail" src="https://attachments.academia-assets.com/57587353/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/16449653/Visions_of_a_Generalised_Probability_Theory">Visions of a Generalised Probability Theory</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Computer vision is an ever growing discipline whose ambitious goal is to enable machines with the...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Computer vision is an ever growing discipline whose ambitious goal is to enable machines with the intelligent<br />visual skills humans and animals are provided by Nature, allowing them to interact effortlessly<br />with complex, dynamic environments. Designing automated visual recognition and sensing systems<br />typically involves tackling a number of challenging tasks, and requires an impressive variety of sophisticated<br />mathematical tools. In most cases, the knowledge a machine has of its surroundings is at best<br />incomplete – missing data is a common problem, and visual cues are affected by imprecision. The need<br />for a coherent mathematical ‘language’ for the description of uncertain models and measurements then<br />naturally arises from the solution of computer vision problems.<br /><br />The theory of evidence (sometimes referred to as ‘evidential reasoning’, ‘belief theory’ or ‘Dempster-<br />Shafer theory’) is, perhaps, one of the most successful approaches to uncertainty modelling, as arguably<br />the most straightforward and intuitive approaches to a generalized probability theory. Emerging in the<br />last Sixties from a profound criticism of the more classical Bayesian theory of inference and modelling<br />of uncertainty, it stimulated in the last decades an extensive discussion of the epistemic nature of both<br />subjective ‘degrees of beliefs’ and frequentist ‘chances’ or relative frequencies. More recently, a renewed<br />interest in belief functions, the mathematical generalization of probabilities which are the object of study<br />of the theory of evidence, has seen a blossoming of applications to a variety of fields of applied science.<br /><br />In this Book we are going to show how, indeed, the fruitful interaction of computer vision and evidential<br />reasoning is able stimulate a number of advances in both fields. From a methodological point of<br />view, novel theoretical advances concerning the geometric and algebraic properties of belief functions as<br />mathematical objects will be illustrated in some detail in Part II, with a focus on a perspective ‘geometric<br />approach’ to uncertainty and an algebraic solution of the issue of conflicting evidence. In Part III we will<br />illustrate how these new perspectives on the theory of belief functions arise from important computer vision<br />problems, such as articulated object tracking, data association and object pose estimation, to which<br />in turn the evidential formalism can give interesting new solutions. Finally, some initial steps towards<br />a generalization of the notion of total probability to belief functions will be taken, in the perspective of<br />endowing the theory of evidence with a complete battery of estimation and inference tools to the benefit<br />of scientists and practitioners.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="24dc367eb287b9ead9dd9a414861a18e" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":57587353,"asset_id":16449653,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/57587353/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="16449653"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="16449653"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 16449653; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=16449653]").text(description); $(".js-view-count[data-work-id=16449653]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 16449653; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='16449653']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "24dc367eb287b9ead9dd9a414861a18e" } } $('.js-work-strip[data-work-id=16449653]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":16449653,"title":"Visions of a Generalised Probability Theory","translated_title":"","metadata":{"abstract":"Computer vision is an ever growing discipline whose ambitious goal is to enable machines with the intelligent\nvisual skills humans and animals are provided by Nature, allowing them to interact effortlessly\nwith complex, dynamic environments. Designing automated visual recognition and sensing systems\ntypically involves tackling a number of challenging tasks, and requires an impressive variety of sophisticated\nmathematical tools. In most cases, the knowledge a machine has of its surroundings is at best\nincomplete – missing data is a common problem, and visual cues are affected by imprecision. The need\nfor a coherent mathematical ‘language’ for the description of uncertain models and measurements then\nnaturally arises from the solution of computer vision problems.\n\nThe theory of evidence (sometimes referred to as ‘evidential reasoning’, ‘belief theory’ or ‘Dempster-\nShafer theory’) is, perhaps, one of the most successful approaches to uncertainty modelling, as arguably\nthe most straightforward and intuitive approaches to a generalized probability theory. Emerging in the\nlast Sixties from a profound criticism of the more classical Bayesian theory of inference and modelling\nof uncertainty, it stimulated in the last decades an extensive discussion of the epistemic nature of both\nsubjective ‘degrees of beliefs’ and frequentist ‘chances’ or relative frequencies. More recently, a renewed\ninterest in belief functions, the mathematical generalization of probabilities which are the object of study\nof the theory of evidence, has seen a blossoming of applications to a variety of fields of applied science.\n\nIn this Book we are going to show how, indeed, the fruitful interaction of computer vision and evidential\nreasoning is able stimulate a number of advances in both fields. From a methodological point of\nview, novel theoretical advances concerning the geometric and algebraic properties of belief functions as\nmathematical objects will be illustrated in some detail in Part II, with a focus on a perspective ‘geometric\napproach’ to uncertainty and an algebraic solution of the issue of conflicting evidence. In Part III we will\nillustrate how these new perspectives on the theory of belief functions arise from important computer vision\nproblems, such as articulated object tracking, data association and object pose estimation, to which\nin turn the evidential formalism can give interesting new solutions. Finally, some initial steps towards\na generalization of the notion of total probability to belief functions will be taken, in the perspective of\nendowing the theory of evidence with a complete battery of estimation and inference tools to the benefit\nof scientists and practitioners.","ai_title_tag":"Evidential Reasoning in Computer Vision","publication_date":{"day":null,"month":null,"year":2014,"errors":{}}},"translated_abstract":"Computer vision is an ever growing discipline whose ambitious goal is to enable machines with the intelligent\nvisual skills humans and animals are provided by Nature, allowing them to interact effortlessly\nwith complex, dynamic environments. Designing automated visual recognition and sensing systems\ntypically involves tackling a number of challenging tasks, and requires an impressive variety of sophisticated\nmathematical tools. In most cases, the knowledge a machine has of its surroundings is at best\nincomplete – missing data is a common problem, and visual cues are affected by imprecision. The need\nfor a coherent mathematical ‘language’ for the description of uncertain models and measurements then\nnaturally arises from the solution of computer vision problems.\n\nThe theory of evidence (sometimes referred to as ‘evidential reasoning’, ‘belief theory’ or ‘Dempster-\nShafer theory’) is, perhaps, one of the most successful approaches to uncertainty modelling, as arguably\nthe most straightforward and intuitive approaches to a generalized probability theory. Emerging in the\nlast Sixties from a profound criticism of the more classical Bayesian theory of inference and modelling\nof uncertainty, it stimulated in the last decades an extensive discussion of the epistemic nature of both\nsubjective ‘degrees of beliefs’ and frequentist ‘chances’ or relative frequencies. More recently, a renewed\ninterest in belief functions, the mathematical generalization of probabilities which are the object of study\nof the theory of evidence, has seen a blossoming of applications to a variety of fields of applied science.\n\nIn this Book we are going to show how, indeed, the fruitful interaction of computer vision and evidential\nreasoning is able stimulate a number of advances in both fields. From a methodological point of\nview, novel theoretical advances concerning the geometric and algebraic properties of belief functions as\nmathematical objects will be illustrated in some detail in Part II, with a focus on a perspective ‘geometric\napproach’ to uncertainty and an algebraic solution of the issue of conflicting evidence. In Part III we will\nillustrate how these new perspectives on the theory of belief functions arise from important computer vision\nproblems, such as articulated object tracking, data association and object pose estimation, to which\nin turn the evidential formalism can give interesting new solutions. Finally, some initial steps towards\na generalization of the notion of total probability to belief functions will be taken, in the perspective of\nendowing the theory of evidence with a complete battery of estimation and inference tools to the benefit\nof scientists and practitioners.","internal_url":"https://www.academia.edu/16449653/Visions_of_a_Generalised_Probability_Theory","translated_internal_url":"","created_at":"2015-10-04T08:33:19.999-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"other","co_author_tags":[],"downloadable_attachments":[{"id":57587353,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57587353/thumbnails/1.jpg","file_name":"lap-arxiv.pdf","download_url":"https://www.academia.edu/attachments/57587353/download_file","bulk_download_file_name":"Visions_of_a_Generalised_Probability_The.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57587353/lap-arxiv-libre.pdf?1539861477=\u0026response-content-disposition=attachment%3B+filename%3DVisions_of_a_Generalised_Probability_The.pdf\u0026Expires=1743344605\u0026Signature=AThwV0H2-ocTomt~odXrzJW1bR3BlfrQmFLPdLGDbqPTC4ar12GoWZ6iZJGYMdMUoqNLgGnmtvSSoa6mKDlur7OaHHvSJ~wWeZrOd9ACIPiNUrFWr-DTfU4wKcUBzbsKtKtmjFc6svTcUeXoJ4rGkyVb-vvUI7-ln0L0uMU~CVJRku1LFZylYjg~cWvlTLmsxqgH-s8~Hxt~mum3-TfcxiBO20tXtqGxBVVUfWBn6aKDpg4YnFW2e6LMPl3nga8mXLalNMMIbARMEaWzkZN4-VzpcEGpnxghu8XZ7eQ-SVx-iF-WRSLbnUVEUZ8B8rW29r~ef9~IKZ5tTgeFyCOrNg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Visions_of_a_Generalised_Probability_Theory","translated_slug":"","page_count":191,"language":"en","content_type":"Work","summary":"Computer vision is an ever growing discipline whose ambitious goal is to enable machines with the intelligent\nvisual skills humans and animals are provided by Nature, allowing them to interact effortlessly\nwith complex, dynamic environments. Designing automated visual recognition and sensing systems\ntypically involves tackling a number of challenging tasks, and requires an impressive variety of sophisticated\nmathematical tools. In most cases, the knowledge a machine has of its surroundings is at best\nincomplete – missing data is a common problem, and visual cues are affected by imprecision. The need\nfor a coherent mathematical ‘language’ for the description of uncertain models and measurements then\nnaturally arises from the solution of computer vision problems.\n\nThe theory of evidence (sometimes referred to as ‘evidential reasoning’, ‘belief theory’ or ‘Dempster-\nShafer theory’) is, perhaps, one of the most successful approaches to uncertainty modelling, as arguably\nthe most straightforward and intuitive approaches to a generalized probability theory. Emerging in the\nlast Sixties from a profound criticism of the more classical Bayesian theory of inference and modelling\nof uncertainty, it stimulated in the last decades an extensive discussion of the epistemic nature of both\nsubjective ‘degrees of beliefs’ and frequentist ‘chances’ or relative frequencies. More recently, a renewed\ninterest in belief functions, the mathematical generalization of probabilities which are the object of study\nof the theory of evidence, has seen a blossoming of applications to a variety of fields of applied science.\n\nIn this Book we are going to show how, indeed, the fruitful interaction of computer vision and evidential\nreasoning is able stimulate a number of advances in both fields. From a methodological point of\nview, novel theoretical advances concerning the geometric and algebraic properties of belief functions as\nmathematical objects will be illustrated in some detail in Part II, with a focus on a perspective ‘geometric\napproach’ to uncertainty and an algebraic solution of the issue of conflicting evidence. In Part III we will\nillustrate how these new perspectives on the theory of belief functions arise from important computer vision\nproblems, such as articulated object tracking, data association and object pose estimation, to which\nin turn the evidential formalism can give interesting new solutions. Finally, some initial steps towards\na generalization of the notion of total probability to belief functions will be taken, in the perspective of\nendowing the theory of evidence with a complete battery of estimation and inference tools to the benefit\nof scientists and practitioners.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":57587353,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57587353/thumbnails/1.jpg","file_name":"lap-arxiv.pdf","download_url":"https://www.academia.edu/attachments/57587353/download_file","bulk_download_file_name":"Visions_of_a_Generalised_Probability_The.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57587353/lap-arxiv-libre.pdf?1539861477=\u0026response-content-disposition=attachment%3B+filename%3DVisions_of_a_Generalised_Probability_The.pdf\u0026Expires=1743344605\u0026Signature=AThwV0H2-ocTomt~odXrzJW1bR3BlfrQmFLPdLGDbqPTC4ar12GoWZ6iZJGYMdMUoqNLgGnmtvSSoa6mKDlur7OaHHvSJ~wWeZrOd9ACIPiNUrFWr-DTfU4wKcUBzbsKtKtmjFc6svTcUeXoJ4rGkyVb-vvUI7-ln0L0uMU~CVJRku1LFZylYjg~cWvlTLmsxqgH-s8~Hxt~mum3-TfcxiBO20tXtqGxBVVUfWBn6aKDpg4YnFW2e6LMPl3nga8mXLalNMMIbARMEaWzkZN4-VzpcEGpnxghu8XZ7eQ-SVx-iF-WRSLbnUVEUZ8B8rW29r~ef9~IKZ5tTgeFyCOrNg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":3244,"name":"Bayesian","url":"https://www.academia.edu/Documents/in/Bayesian"},{"id":4060,"name":"Applied Statistics","url":"https://www.academia.edu/Documents/in/Applied_Statistics"},{"id":4165,"name":"Fuzzy Logic","url":"https://www.academia.edu/Documents/in/Fuzzy_Logic"},{"id":5394,"name":"Fuzzy set theory","url":"https://www.academia.edu/Documents/in/Fuzzy_set_theory"},{"id":6132,"name":"Soft Computing","url":"https://www.academia.edu/Documents/in/Soft_Computing"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":10182,"name":"Fuzzy Sets","url":"https://www.academia.edu/Documents/in/Fuzzy_Sets"},{"id":13000,"name":"Dempster-Shafer Analysis","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Analysis"},{"id":14585,"name":"Statistical Modeling","url":"https://www.academia.edu/Documents/in/Statistical_Modeling"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":16103,"name":"Fuzzy Systems","url":"https://www.academia.edu/Documents/in/Fuzzy_Systems"},{"id":21593,"name":"Artificial Inteligence","url":"https://www.academia.edu/Documents/in/Artificial_Inteligence"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":28512,"name":"Bayesian Networks","url":"https://www.academia.edu/Documents/in/Bayesian_Networks"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":31477,"name":"Uncertainty Quantification","url":"https://www.academia.edu/Documents/in/Uncertainty_Quantification"},{"id":31900,"name":"Fuzzy","url":"https://www.academia.edu/Documents/in/Fuzzy"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":41239,"name":"Bayesian statistics \u0026 modelling","url":"https://www.academia.edu/Documents/in/Bayesian_statistics_and_modelling"},{"id":51529,"name":"Bayesian Inference","url":"https://www.academia.edu/Documents/in/Bayesian_Inference"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":74262,"name":"Philosophy of Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Philosophy_of_Artificial_Intelligence"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":141502,"name":"Inteligencia artificial","url":"https://www.academia.edu/Documents/in/Inteligencia_artificial-1"},{"id":224842,"name":"Risk and Uncertainty in decision-making","url":"https://www.academia.edu/Documents/in/Risk_and_Uncertainty_in_decision-making"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"},{"id":304534,"name":"Dempster-Shafer Theory of Evidence","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Theory_of_Evidence"},{"id":510403,"name":"Possibility","url":"https://www.academia.edu/Documents/in/Possibility"},{"id":550849,"name":"POSSIBILITY THEORY","url":"https://www.academia.edu/Documents/in/POSSIBILITY_THEORY"},{"id":571797,"name":"Introduction to Probability","url":"https://www.academia.edu/Documents/in/Introduction_to_Probability"},{"id":608598,"name":"Uncertainty Modeling","url":"https://www.academia.edu/Documents/in/Uncertainty_Modeling"},{"id":1005286,"name":"Dempster Shafer Theory","url":"https://www.academia.edu/Documents/in/Dempster_Shafer_Theory"},{"id":1007285,"name":"Probability Theory and Statistics","url":"https://www.academia.edu/Documents/in/Probability_Theory_and_Statistics"},{"id":1223686,"name":"Artificial Intelligent and Soft Computing Methodologies","url":"https://www.academia.edu/Documents/in/Artificial_Intelligent_and_Soft_Computing_Methodologies"}],"urls":[{"id":6096396,"url":"https://www.lap-publishing.com/catalog/details//store/gb/book/978-3-659-13175-2/visions-of-a-generalized-probability-theory"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-16449653-figures'); } }); </script> </div><div class="profile--tab_content_container js-tab-pane tab-pane" data-section-id="5531966" id="talks"><div class="js-work-strip profile--work_container" data-work-id="37182727"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/37182727/Machine_Learning_in_Surgery"><img alt="Research paper thumbnail of Machine Learning in Surgery" class="work-thumbnail" src="https://attachments.academia-assets.com/57134116/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/37182727/Machine_Learning_in_Surgery">Machine Learning in Surgery</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">This invited talk at COSUR 2018 describes a number of aspects of the application of machine learn...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">This invited talk at COSUR 2018 describes a number of aspects of the application of machine learning to surgical robotics, ranging from perception to cognition (the recognition of surgeon actions, anomalous events, and the prediction of future developments).</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="67ccbb61de9c9cbbefc855878efcb2f1" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":57134116,"asset_id":37182727,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/57134116/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="37182727"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="37182727"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 37182727; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=37182727]").text(description); $(".js-view-count[data-work-id=37182727]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 37182727; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='37182727']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "67ccbb61de9c9cbbefc855878efcb2f1" } } $('.js-work-strip[data-work-id=37182727]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":37182727,"title":"Machine Learning in Surgery","translated_title":"","metadata":{"abstract":"This invited talk at COSUR 2018 describes a number of aspects of the application of machine learning to surgical robotics, ranging from perception to cognition (the recognition of surgeon actions, anomalous events, and the prediction of future developments)."},"translated_abstract":"This invited talk at COSUR 2018 describes a number of aspects of the application of machine learning to surgical robotics, ranging from perception to cognition (the recognition of surgeon actions, anomalous events, and the prediction of future developments).","internal_url":"https://www.academia.edu/37182727/Machine_Learning_in_Surgery","translated_internal_url":"","created_at":"2018-08-05T05:06:58.751-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"talk","co_author_tags":[],"downloadable_attachments":[{"id":57134116,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57134116/thumbnails/1.jpg","file_name":"COSUR_2018.pdf","download_url":"https://www.academia.edu/attachments/57134116/download_file","bulk_download_file_name":"Machine_Learning_in_Surgery.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57134116/COSUR_2018-libre.pdf?1533472941=\u0026response-content-disposition=attachment%3B+filename%3DMachine_Learning_in_Surgery.pdf\u0026Expires=1743078337\u0026Signature=La9siBnRZlJ2TQHPas0KxBmmU8SLjpRFdf0YZhECar3LKG~TrPBzppbxSPN8WV2Qhf66Q9Rgf9aOoyryVHNZK6XY3ukluhMdv56VtHYEfvF0LRqDVEL5SZBZK14PG1T1krwRLKlHhiiutMqz-3rqvNOHMn~JQFLvJAjIBkjgDin6Ws79Lw5buK6OKC3Onl78J8aVA31PI0DeHVS7EI9McXz~fZY-4wylt~7~RfX-aVuAbyqpvWAEVDl5D7fjrWNNPNiZ~wN0J-yOSoM5KPj91pHxOie95Z~ws9jtHk7HJbCSZ-S4YFUdyUowTxj3gZR60LqBAydCvzMjl4O-BSUmWQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Machine_Learning_in_Surgery","translated_slug":"","page_count":72,"language":"en","content_type":"Work","summary":"This invited talk at COSUR 2018 describes a number of aspects of the application of machine learning to surgical robotics, ranging from perception to cognition (the recognition of surgeon actions, anomalous events, and the prediction of future developments).","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":57134116,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57134116/thumbnails/1.jpg","file_name":"COSUR_2018.pdf","download_url":"https://www.academia.edu/attachments/57134116/download_file","bulk_download_file_name":"Machine_Learning_in_Surgery.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57134116/COSUR_2018-libre.pdf?1533472941=\u0026response-content-disposition=attachment%3B+filename%3DMachine_Learning_in_Surgery.pdf\u0026Expires=1743078337\u0026Signature=La9siBnRZlJ2TQHPas0KxBmmU8SLjpRFdf0YZhECar3LKG~TrPBzppbxSPN8WV2Qhf66Q9Rgf9aOoyryVHNZK6XY3ukluhMdv56VtHYEfvF0LRqDVEL5SZBZK14PG1T1krwRLKlHhiiutMqz-3rqvNOHMn~JQFLvJAjIBkjgDin6Ws79Lw5buK6OKC3Onl78J8aVA31PI0DeHVS7EI9McXz~fZY-4wylt~7~RfX-aVuAbyqpvWAEVDl5D7fjrWNNPNiZ~wN0J-yOSoM5KPj91pHxOie95Z~ws9jtHk7HJbCSZ-S4YFUdyUowTxj3gZR60LqBAydCvzMjl4O-BSUmWQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":77,"name":"Robotics","url":"https://www.academia.edu/Documents/in/Robotics"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":471,"name":"Robotics (Computer Science)","url":"https://www.academia.edu/Documents/in/Robotics_Computer_Science_"},{"id":647,"name":"Surgery","url":"https://www.academia.edu/Documents/in/Surgery"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":3413,"name":"Robot Vision","url":"https://www.academia.edu/Documents/in/Robot_Vision"},{"id":4892,"name":"Autonomous Robotics","url":"https://www.academia.edu/Documents/in/Autonomous_Robotics"},{"id":7728,"name":"Object Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Object_Recognition_Computer_Vision_"},{"id":14417,"name":"Machine Vision","url":"https://www.academia.edu/Documents/in/Machine_Vision"},{"id":37861,"name":"Laparoscopic Surgery","url":"https://www.academia.edu/Documents/in/Laparoscopic_Surgery"},{"id":42630,"name":"Surgical Robotics","url":"https://www.academia.edu/Documents/in/Surgical_Robotics"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":68477,"name":"Robotics, Computer Vision, Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Robotics_Computer_Vision_Artificial_Intelligence"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":111217,"name":"Scene Understanding","url":"https://www.academia.edu/Documents/in/Scene_Understanding"},{"id":141502,"name":"Inteligencia artificial","url":"https://www.academia.edu/Documents/in/Inteligencia_artificial-1"},{"id":315678,"name":"Human Action Recognition","url":"https://www.academia.edu/Documents/in/Human_Action_Recognition"},{"id":1211304,"name":"Artificial Neural Network","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Network"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="37181245"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/37181245/Random_sets_at_the_interface_of_statistics_and_AI_Fifth_Bayesian_Fiducial_and_Frequentist_BFF5_Conference"><img alt="Research paper thumbnail of Random sets at the interface of statistics and AI Fifth Bayesian, Fiducial, and Frequentist (BFF5) Conference" class="work-thumbnail" src="https://attachments.academia-assets.com/57132604/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/37181245/Random_sets_at_the_interface_of_statistics_and_AI_Fifth_Bayesian_Fiducial_and_Frequentist_BFF5_Conference">Random sets at the interface of statistics and AI Fifth Bayesian, Fiducial, and Frequentist (BFF5) Conference</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Random set theory, originally born within the remit of mathematical statistics, lies nowadays at ...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Random set theory, originally born within the remit of mathematical statistics, lies nowadays at the interface of statistics and AI. Arguably more mathematically complex than standard probability, the field is now facing open issues such as the formulation of generalised laws of probability, the generalisation of the notion of random variable to random set spaces, the extension of the notion of random process, and so on. Frequentist inference with random sets can be envisaged to better describe common situations such as lack of data and set-valued observations. To this aim, parameterised families of random sets (and Gaussian random sets in particular) are a crucial area of investigation. In particular, we will present some recent work on the generalisation of the notion of likelihood, as the basis for a generalised logistic regression framework capable to better estimate rare events; a random set-version of maximum-entropy classifiers; and a recent generalisation of the law of total probability to belief functions. In a longer-term perspective, random set theory can be instrumental to new robust foundations for statistical machine learning allowing the formulation of models and algorithms able to deal with mission-critical applications ‘in the wild’, in a mutual beneficial exchange between statistics and artificial intelligence.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="4e9893d3bce12f862f1eaede18b86484" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":57132604,"asset_id":37181245,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/57132604/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="37181245"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="37181245"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 37181245; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=37181245]").text(description); $(".js-view-count[data-work-id=37181245]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 37181245; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='37181245']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "4e9893d3bce12f862f1eaede18b86484" } } $('.js-work-strip[data-work-id=37181245]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":37181245,"title":"Random sets at the interface of statistics and AI Fifth Bayesian, Fiducial, and Frequentist (BFF5) Conference","translated_title":"","metadata":{"abstract":"Random set theory, originally born within the remit of mathematical statistics, lies nowadays at the interface of statistics and AI. Arguably more mathematically complex than standard probability, the field is now facing open issues such as the formulation of generalised laws of probability, the generalisation of the notion of random variable to random set spaces, the extension of the notion of random process, and so on. Frequentist inference with random sets can be envisaged to better describe common situations such as lack of data and set-valued observations. To this aim, parameterised families of random sets (and Gaussian random sets in particular) are a crucial area of investigation. In particular, we will present some recent work on the generalisation of the notion of likelihood, as the basis for a generalised logistic regression framework capable to better estimate rare events; a random set-version of maximum-entropy classifiers; and a recent generalisation of the law of total probability to belief functions. In a longer-term perspective, random set theory can be instrumental to new robust foundations for statistical machine learning allowing the formulation of models and algorithms able to deal with mission-critical applications ‘in the wild’, in a mutual beneficial exchange between statistics and artificial intelligence."},"translated_abstract":"Random set theory, originally born within the remit of mathematical statistics, lies nowadays at the interface of statistics and AI. Arguably more mathematically complex than standard probability, the field is now facing open issues such as the formulation of generalised laws of probability, the generalisation of the notion of random variable to random set spaces, the extension of the notion of random process, and so on. Frequentist inference with random sets can be envisaged to better describe common situations such as lack of data and set-valued observations. To this aim, parameterised families of random sets (and Gaussian random sets in particular) are a crucial area of investigation. In particular, we will present some recent work on the generalisation of the notion of likelihood, as the basis for a generalised logistic regression framework capable to better estimate rare events; a random set-version of maximum-entropy classifiers; and a recent generalisation of the law of total probability to belief functions. In a longer-term perspective, random set theory can be instrumental to new robust foundations for statistical machine learning allowing the formulation of models and algorithms able to deal with mission-critical applications ‘in the wild’, in a mutual beneficial exchange between statistics and artificial intelligence.","internal_url":"https://www.academia.edu/37181245/Random_sets_at_the_interface_of_statistics_and_AI_Fifth_Bayesian_Fiducial_and_Frequentist_BFF5_Conference","translated_internal_url":"","created_at":"2018-08-04T14:37:12.151-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"talk","co_author_tags":[],"downloadable_attachments":[{"id":57132604,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57132604/thumbnails/1.jpg","file_name":"BFF5.pdf","download_url":"https://www.academia.edu/attachments/57132604/download_file","bulk_download_file_name":"Random_sets_at_the_interface_of_statisti.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57132604/BFF5-libre.pdf?1533419766=\u0026response-content-disposition=attachment%3B+filename%3DRandom_sets_at_the_interface_of_statisti.pdf\u0026Expires=1743344605\u0026Signature=EhuYZ8i0itNuiQClUhUx7nwjdmFPlrnXBjX6vUJcwpCT~~YIuk3TvfpQSnIWxZyrTdDZ2ODqQQ1t7IPL963hi-kN~08I8hbHlbti68YZ51RDaPNhgbacsYnFRC3HR4s0uo59DsUdykFsvF4W9xi-o2QgHKQz1Yx1FBwt2q0xjm17RGI~XsYx0ISYqM8-uAD1pjCMnLQ2yPfbY1FuBmxjcan7duOPgdRSrj5Z9y2uORSR9k~FJ-2ZxTvVcyuaqn5E2nvdjqljoZNVSZLebjnwyHlpVxoPEBwBFn~SyTGfTL4AdznUTI81BSGbtE1N0xbWbpT9F9JizRXvO86t1qrB2Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Random_sets_at_the_interface_of_statistics_and_AI_Fifth_Bayesian_Fiducial_and_Frequentist_BFF5_Conference","translated_slug":"","page_count":31,"language":"en","content_type":"Work","summary":"Random set theory, originally born within the remit of mathematical statistics, lies nowadays at the interface of statistics and AI. Arguably more mathematically complex than standard probability, the field is now facing open issues such as the formulation of generalised laws of probability, the generalisation of the notion of random variable to random set spaces, the extension of the notion of random process, and so on. Frequentist inference with random sets can be envisaged to better describe common situations such as lack of data and set-valued observations. To this aim, parameterised families of random sets (and Gaussian random sets in particular) are a crucial area of investigation. In particular, we will present some recent work on the generalisation of the notion of likelihood, as the basis for a generalised logistic regression framework capable to better estimate rare events; a random set-version of maximum-entropy classifiers; and a recent generalisation of the law of total probability to belief functions. In a longer-term perspective, random set theory can be instrumental to new robust foundations for statistical machine learning allowing the formulation of models and algorithms able to deal with mission-critical applications ‘in the wild’, in a mutual beneficial exchange between statistics and artificial intelligence.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":57132604,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57132604/thumbnails/1.jpg","file_name":"BFF5.pdf","download_url":"https://www.academia.edu/attachments/57132604/download_file","bulk_download_file_name":"Random_sets_at_the_interface_of_statisti.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57132604/BFF5-libre.pdf?1533419766=\u0026response-content-disposition=attachment%3B+filename%3DRandom_sets_at_the_interface_of_statisti.pdf\u0026Expires=1743344605\u0026Signature=EhuYZ8i0itNuiQClUhUx7nwjdmFPlrnXBjX6vUJcwpCT~~YIuk3TvfpQSnIWxZyrTdDZ2ODqQQ1t7IPL963hi-kN~08I8hbHlbti68YZ51RDaPNhgbacsYnFRC3HR4s0uo59DsUdykFsvF4W9xi-o2QgHKQz1Yx1FBwt2q0xjm17RGI~XsYx0ISYqM8-uAD1pjCMnLQ2yPfbY1FuBmxjcan7duOPgdRSrj5Z9y2uORSR9k~FJ-2ZxTvVcyuaqn5E2nvdjqljoZNVSZLebjnwyHlpVxoPEBwBFn~SyTGfTL4AdznUTI81BSGbtE1N0xbWbpT9F9JizRXvO86t1qrB2Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":141502,"name":"Inteligencia artificial","url":"https://www.academia.edu/Documents/in/Inteligencia_artificial-1"},{"id":492766,"name":"Random Sets","url":"https://www.academia.edu/Documents/in/Random_Sets"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-37181245-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="36221243"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/36221243/Disruptive_Visual_AI_A_BMW_Knowledge_Day_presentation"><img alt="Research paper thumbnail of Disruptive Visual AI - A BMW Knowledge Day presentation" class="work-thumbnail" src="https://attachments.academia-assets.com/56123982/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/36221243/Disruptive_Visual_AI_A_BMW_Knowledge_Day_presentation">Disruptive Visual AI - A BMW Knowledge Day presentation</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="994c56422fdc910f00e52251d1fd74fe" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":56123982,"asset_id":36221243,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/56123982/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="36221243"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="36221243"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 36221243; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=36221243]").text(description); $(".js-view-count[data-work-id=36221243]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 36221243; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='36221243']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "994c56422fdc910f00e52251d1fd74fe" } } $('.js-work-strip[data-work-id=36221243]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":36221243,"title":"Disruptive Visual AI - A BMW Knowledge Day presentation","translated_title":"","metadata":{"ai_abstract":"The presentation on \"Disruptive Visual AI\" at the BMW Knowledge Day highlights the potential of artificial intelligence in factory settings, particularly through the use of video monitoring to proactively detect anomalies and communicate warnings to workers in natural language. This technology aims to enhance safety and operational efficiency by providing real-time feedback and alerts in a comprehensible manner.","ai_title_tag":"Visual AI for Safety in Factory Settings"},"translated_abstract":null,"internal_url":"https://www.academia.edu/36221243/Disruptive_Visual_AI_A_BMW_Knowledge_Day_presentation","translated_internal_url":"","created_at":"2018-03-21T11:45:42.697-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"talk","co_author_tags":[{"id":31221436,"work_id":36221243,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5034152,"email":"f***n@gmail.com","display_order":1,"name":"Fabio Cuzzolin","title":"Disruptive Visual AI - A BMW Knowledge Day presentation"}],"downloadable_attachments":[{"id":56123982,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/56123982/thumbnails/1.jpg","file_name":"BMW_Knowledge_Day.pdf","download_url":"https://www.academia.edu/attachments/56123982/download_file","bulk_download_file_name":"Disruptive_Visual_AI_A_BMW_Knowledge_Day.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/56123982/BMW_Knowledge_Day-libre.pdf?1521659058=\u0026response-content-disposition=attachment%3B+filename%3DDisruptive_Visual_AI_A_BMW_Knowledge_Day.pdf\u0026Expires=1743078337\u0026Signature=aO2KuVDA5gIBzCgNfcqeNduf9SinyXCeLxoFv34rAfLvbMssTlTn3g86aHnVfVl0cnVDrnSTmqbfE28Z6M81mloNhyt4M~Nw5PbeTD8B5URnKworFaZxY~BW-Rbi9aLdtu-XvM7Wgvwmg3B0hCXwPggXIjgbk7ZGF2GDCAJAFa98kAtsrnBIWWx20S2FzxqdHJLntt16PrwQcQvtITKyuP6mDsVfWp1v-UqOhgLvCf1WIb3Grnl61pb4CRm47H-SEBcQnDhrJeB4GWl9tk1m1WDwgb-2S8OuidgPnF3kDs-URs3g2a6QXSt54alv~SodFgy3nnE-VwKlDHrJABOB6A__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Disruptive_Visual_AI_A_BMW_Knowledge_Day_presentation","translated_slug":"","page_count":35,"language":"en","content_type":"Work","summary":null,"owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":56123982,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/56123982/thumbnails/1.jpg","file_name":"BMW_Knowledge_Day.pdf","download_url":"https://www.academia.edu/attachments/56123982/download_file","bulk_download_file_name":"Disruptive_Visual_AI_A_BMW_Knowledge_Day.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/56123982/BMW_Knowledge_Day-libre.pdf?1521659058=\u0026response-content-disposition=attachment%3B+filename%3DDisruptive_Visual_AI_A_BMW_Knowledge_Day.pdf\u0026Expires=1743078337\u0026Signature=aO2KuVDA5gIBzCgNfcqeNduf9SinyXCeLxoFv34rAfLvbMssTlTn3g86aHnVfVl0cnVDrnSTmqbfE28Z6M81mloNhyt4M~Nw5PbeTD8B5URnKworFaZxY~BW-Rbi9aLdtu-XvM7Wgvwmg3B0hCXwPggXIjgbk7ZGF2GDCAJAFa98kAtsrnBIWWx20S2FzxqdHJLntt16PrwQcQvtITKyuP6mDsVfWp1v-UqOhgLvCf1WIb3Grnl61pb4CRm47H-SEBcQnDhrJeB4GWl9tk1m1WDwgb-2S8OuidgPnF3kDs-URs3g2a6QXSt54alv~SodFgy3nnE-VwKlDHrJABOB6A__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":36449,"name":"Activity Recognition","url":"https://www.academia.edu/Documents/in/Activity_Recognition"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":57238,"name":"Human Activity Recognition","url":"https://www.academia.edu/Documents/in/Human_Activity_Recognition"},{"id":73673,"name":"Video Surveillance","url":"https://www.academia.edu/Documents/in/Video_Surveillance"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":610496,"name":"Security and Surveillance","url":"https://www.academia.edu/Documents/in/Security_and_Surveillance"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") }); </script> <div class="js-work-strip profile--work_container" data-work-id="36221234"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/36221234/New_trends_in_AI_for_visual_recognition_and_prediction"><img alt="Research paper thumbnail of New trends in AI for visual recognition and prediction" class="work-thumbnail" src="https://attachments.academia-assets.com/56123973/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/36221234/New_trends_in_AI_for_visual_recognition_and_prediction">New trends in AI for visual recognition and prediction</a></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="273de297bb69747bd3b3672aade6a0c8" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":56123973,"asset_id":36221234,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/56123973/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="36221234"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="36221234"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 36221234; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=36221234]").text(description); $(".js-view-count[data-work-id=36221234]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 36221234; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='36221234']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "273de297bb69747bd3b3672aade6a0c8" } } $('.js-work-strip[data-work-id=36221234]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":36221234,"title":"New trends in AI for visual recognition and prediction","translated_title":"","metadata":{"ai_abstract":"The paper discusses the integration of deep learning techniques in the online recognition of complex activities, which are defined as sequences of simpler actions. It highlights a shift from traditional generative models to a discriminative approach using deep learning architectures, proposing a novel framework that identifies and classifies action instances as \"action tubes\" within video data. The authors emphasize the superiority of their framework over established methods, demonstrating significant improvements in recognition rates.","ai_title_tag":"Deep Learning for Action Recognition in Videos"},"translated_abstract":null,"internal_url":"https://www.academia.edu/36221234/New_trends_in_AI_for_visual_recognition_and_prediction","translated_internal_url":"","created_at":"2018-03-21T11:43:54.168-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"talk","co_author_tags":[{"id":31221430,"work_id":36221234,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5034152,"email":"f***n@gmail.com","display_order":1,"name":"Fabio Cuzzolin","title":"New trends in AI for visual recognition and prediction"}],"downloadable_attachments":[{"id":56123973,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/56123973/thumbnails/1.jpg","file_name":"New_trends_in_AI_for_visual_recognition_and_prediction.pdf","download_url":"https://www.academia.edu/attachments/56123973/download_file","bulk_download_file_name":"New_trends_in_AI_for_visual_recognition.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/56123973/New_trends_in_AI_for_visual_recognition_and_prediction-libre.pdf?1521659059=\u0026response-content-disposition=attachment%3B+filename%3DNew_trends_in_AI_for_visual_recognition.pdf\u0026Expires=1743273936\u0026Signature=K6~LoSuBsMkiQnhU9AprmLIHaSOCXELQ8vAdbFAGXqpzKmWCD31VZBDAAvf55eufxLznx0~a314OZJZ0MsE3z5qt94qlLEXYrjWT~6vTXAqZ7LEzuzXDjApL3nL0aK75jHd3z38lVJNdHWgklCp1rqN1MWo6q1N8To1BSEAAuSZO3KFVW7Vb2vO7EqhPf89jRSKzPtoTGS5heRohgYROzOKgGGgGhM4H-HJazI-F~j3pSmIvPFdjpkOY06acfR5a7Px64pNLU66ZbCAIn2lrOsdHvZ1-X0L21iRFZGZLZpRGUZ3vc9cOeD0tri-epF5NLA5xr0JaHIpHCbqsglsSWA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"New_trends_in_AI_for_visual_recognition_and_prediction","translated_slug":"","page_count":12,"language":"en","content_type":"Work","summary":null,"owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin","email":"YWVwZFYxYVJ1R0NZa3d4WVVIYjJocktDazBaNjBIbXJ3WGNEVTZXNkhoUGVYKzhseC9tSG4zVWVjRTk4ZXVNQS0tVHhNYklJR254NkRybzk3VUVxbCsrZz09--5c92499a631978bca2a33ce5ae9d4f37e37d386e"},"attachments":[{"id":56123973,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/56123973/thumbnails/1.jpg","file_name":"New_trends_in_AI_for_visual_recognition_and_prediction.pdf","download_url":"https://www.academia.edu/attachments/56123973/download_file","bulk_download_file_name":"New_trends_in_AI_for_visual_recognition.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/56123973/New_trends_in_AI_for_visual_recognition_and_prediction-libre.pdf?1521659059=\u0026response-content-disposition=attachment%3B+filename%3DNew_trends_in_AI_for_visual_recognition.pdf\u0026Expires=1743273936\u0026Signature=K6~LoSuBsMkiQnhU9AprmLIHaSOCXELQ8vAdbFAGXqpzKmWCD31VZBDAAvf55eufxLznx0~a314OZJZ0MsE3z5qt94qlLEXYrjWT~6vTXAqZ7LEzuzXDjApL3nL0aK75jHd3z38lVJNdHWgklCp1rqN1MWo6q1N8To1BSEAAuSZO3KFVW7Vb2vO7EqhPf89jRSKzPtoTGS5heRohgYROzOKgGGgGhM4H-HJazI-F~j3pSmIvPFdjpkOY06acfR5a7Px64pNLU66ZbCAIn2lrOsdHvZ1-X0L21iRFZGZLZpRGUZ3vc9cOeD0tri-epF5NLA5xr0JaHIpHCbqsglsSWA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":37840,"name":"da Vinci Robotic Surgical System","url":"https://www.academia.edu/Documents/in/da_Vinci_Robotic_Surgical_System"},{"id":42630,"name":"Surgical Robotics","url":"https://www.academia.edu/Documents/in/Surgical_Robotics"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-36221234-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="36221084"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/36221084/A_theory_of_mind_for_visual_AIs"><img alt="Research paper thumbnail of A theory of mind for visual AIs" class="work-thumbnail" src="https://attachments.academia-assets.com/56123790/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/36221084/A_theory_of_mind_for_visual_AIs">A theory of mind for visual AIs</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Artificial intelligence is becoming part of our lives. Smart cars will engage our roads in less t...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Artificial intelligence is becoming part of our lives. Smart cars will engage our roads in less than ten years’ time; shops with no checkout, which automatically recognise customers and what they purchase, are already open for business. But to enable machines to deal with uncertainty, we must fundamentally change the way machines learn from the data they observe so that they will be able to cope with situations they have never encountered in the safest possible way. Interacting naturally with human beings and their complex environments will only be possible if machines are able to put themselves in people’s shoes: to guess their goals, beliefs and intentions – in other words, to read our minds.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-36221084-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-36221084-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575076/figure-2-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575086/figure-3-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575116/figure-4-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575052/figure-1-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575129/figure-5-professor-fabio-cuzzolin-explores-how-intelligent"><img alt="Professor Fabio Cuzzolin explores how intelligent machines can negotiate a complex world. fraught with uncertainty. To enable machines to deal with situations they have never encountered in the safest possible way. Interacting naturally with human beings and their complex environments will only be possible if machines are able to put themselves in people's shoes: to guess their goals, beliefs and intentions — in other words, to read our minds. " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575136/figure-6-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575145/figure-7-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575158/figure-8-introducing-amazon-go-and-the-worlds-most-advanced"><img alt="Introducing Amazon Go and the world’s most advanced shopping technology " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575169/figure-9-to-operating-with-the-assistance-of-robo-saras"><img alt="... to operating with the assistance of a robo’ [SARAS Horizon 2020 project] " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575180/figure-10-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575197/figure-11-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575208/figure-12-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575216/figure-13-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575227/figure-14-the-issue-is-really-fundamental-one-with-the-way"><img alt="The issue is really a fundamental one with the way machine learning works! " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_014.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575233/figure-15-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_015.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575244/figure-16-allows-machines-to-play-against-each-other-and"><img alt="Allows machines to play against each other and lear Potentially super-human results! (e.g. AlphaGo) " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_016.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575252/figure-17-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_017.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575260/figure-18-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_018.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575264/figure-19-fully-connected-layers-at-the-end-produce-softmax"><img alt="fully connected layers at the end produce softmax sc for classification " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_019.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575360/figure-20-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_020.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575383/figure-21-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_021.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575401/figure-22-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_022.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575418/figure-23-dominant-paradigm-right-now-linking-up-these"><img alt="Dominant paradigm right now: linking up these detections in time " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_023.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575431/figure-24-online-action-tube-generation-incrementally-grows"><img alt="Online action tube generation: incrementally grows multiple tubes, for each action, over time by adding one box at a tim Temporal trimming using an online Viterbi approach " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_024.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575451/figure-25-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_025.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575470/figure-4-online-action-localisation-results-using-the-auc"><img alt="Figure 4. Online action localisation results using the AUC (% metric on J-HMDB-21, at IoU thresholds of 5 = 0.2, 0.5. " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_026.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575485/figure-27-cool-application-analysing-footage-of-football"><img alt="Cool application: analysing footage of football game: Predicting how a play will end [Sportslate, Disney] " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_027.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575512/figure-28-we-are-annotating-the-oxford-robotcar-dataset-with"><img alt="We are annotating the Oxford RobotCar dataset with bounding boxes and labels for events and activities " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_028.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575522/figure-29-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_029.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575535/figure-30-bilinear-interpolation-is-also-used-to-provide"><img alt="Bilinear interpolation is also used to provide a fixed-size representation for proposals " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_030.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575549/figure-31-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_031.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575569/figure-32-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_032.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575594/figure-33-eing-aware-of-whats-happening-is-not-enough"><img alt="}eing aware of what’s happening is not enough " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_033.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575607/figure-34-can-make-predictions-if-know-where-what-observe"><img alt="can make predictions if | know where what | observe s placed in the bigger picture lf | see a person opening the fridge, | can predict soon they will be chopping stuff on the counter " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_034.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575612/figure-35-jur-idea-discriminative-deep-learning-approach"><img alt="Jur idea: ‘discriminative’ deep learning approach Architecture | proposed for recent Amazon and Google resear awards, and | am proposing for H2020 SARAS by Girshick et al’s DPM-CNN object recognition mod | - ee. C - Eee | Inspired " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_035.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575618/figure-36-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_036.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575628/figure-37-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_037.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575642/figure-38-ability-to-think-with-somebody-elses-head"><img alt="Ability to think with somebody else’s head " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_038.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575646/figure-39-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_039.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575658/figure-40-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_040.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575667/figure-41-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_041.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575673/figure-42-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_042.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575680/figure-43-is-this-what-mean-by-mind-reading-machines"><img alt="Is this what | mean by mind-reading machines? " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_043.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575688/figure-44-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_044.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575701/figure-45-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_045.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575717/figure-46-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_046.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575737/figure-47-modest-proposal-inspired-by-westras-hierarchical"><img alt="A modest proposal, inspired by Westra’s Hierarchical Predictive Coding (HPC) [EPSRC Fellowship] " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_047.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575744/figure-48-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_048.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575751/figure-49-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_049.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575763/figure-50-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_050.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575773/figure-51-conventional-wisdom-wants-that-smoothing-the"><img alt="Conventional wisdom wants that smoothing the boundary improves robustness (again from SLT) " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_051.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575780/figure-52-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_052.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575782/figure-53-theory-of-mind-for-visual-ais"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_053.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/19575786/figure-54-think-of-really-smart-phone-that-tells-you-what-is"><img alt="» Think of a really smart phone that tells you what is going on or what might happen in its own words [Huawei] " class="figure-slide-image" src="https://figures.academia-assets.com/56123790/figure_054.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-36221084-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="699ffb918369348ac7ce1ff7ff122e0c" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":56123790,"asset_id":36221084,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/56123790/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="36221084"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="36221084"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 36221084; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=36221084]").text(description); $(".js-view-count[data-work-id=36221084]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 36221084; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='36221084']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "699ffb918369348ac7ce1ff7ff122e0c" } } $('.js-work-strip[data-work-id=36221084]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":36221084,"title":"A theory of mind for visual AIs","translated_title":"","metadata":{"abstract":"Artificial intelligence is becoming part of our lives. Smart cars will engage our roads in less than ten years’ time; shops with no checkout, which automatically recognise customers and what they purchase, are already open for business. But to enable machines to deal with uncertainty, we must fundamentally change the way machines learn from the data they observe so that they will be able to cope with situations they have never encountered in the safest possible way. Interacting naturally with human beings and their complex environments will only be possible if machines are able to put themselves in people’s shoes: to guess their goals, beliefs and intentions – in other words, to read our minds."},"translated_abstract":"Artificial intelligence is becoming part of our lives. Smart cars will engage our roads in less than ten years’ time; shops with no checkout, which automatically recognise customers and what they purchase, are already open for business. But to enable machines to deal with uncertainty, we must fundamentally change the way machines learn from the data they observe so that they will be able to cope with situations they have never encountered in the safest possible way. Interacting naturally with human beings and their complex environments will only be possible if machines are able to put themselves in people’s shoes: to guess their goals, beliefs and intentions – in other words, to read our minds.","internal_url":"https://www.academia.edu/36221084/A_theory_of_mind_for_visual_AIs","translated_internal_url":"","created_at":"2018-03-21T11:20:36.305-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"talk","co_author_tags":[{"id":31221356,"work_id":36221084,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5034152,"email":"f***n@gmail.com","display_order":1,"name":"Fabio Cuzzolin","title":"A theory of mind for visual AIs"}],"downloadable_attachments":[{"id":56123790,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/56123790/thumbnails/1.jpg","file_name":"oxford.pdf","download_url":"https://www.academia.edu/attachments/56123790/download_file","bulk_download_file_name":"A_theory_of_mind_for_visual_AIs.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/56123790/oxford-libre.pdf?1521657302=\u0026response-content-disposition=attachment%3B+filename%3DA_theory_of_mind_for_visual_AIs.pdf\u0026Expires=1743344606\u0026Signature=K~vzgP88vNuW~NxGfCeEphSjxmLzwHrKndJIvVHKZaAxkrh3KlaKfYmahLj8EglvS2z0IbPrVGHJGp8mjTRdw1i-HA4bHEVTOkV1bdegTFxB9kjOal3gafkEJ~DVzggf9ZnkN~EpeUDx4hDdH8DxTpL9LlXhMsZr~RTIiu-cDWRtiTxZLEWeiOOl~RzL52KlQT7deOYOd~yidkx7lIp59D8QXBpNi3UBrTMVw9wKaEacA3r5Re0vgPqTBkwOFSgu8tRJJCYLFl1ZCTghuPF3NdF-e9puh9sx517rv4J04qN77MWiGZqv88vKKztczX1ic13c5QTv2xrXRJZ1JwB52A__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"A_theory_of_mind_for_visual_AIs","translated_slug":"","page_count":71,"language":"en","content_type":"Work","summary":"Artificial intelligence is becoming part of our lives. Smart cars will engage our roads in less than ten years’ time; shops with no checkout, which automatically recognise customers and what they purchase, are already open for business. But to enable machines to deal with uncertainty, we must fundamentally change the way machines learn from the data they observe so that they will be able to cope with situations they have never encountered in the safest possible way. Interacting naturally with human beings and their complex environments will only be possible if machines are able to put themselves in people’s shoes: to guess their goals, beliefs and intentions – in other words, to read our minds.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":56123790,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/56123790/thumbnails/1.jpg","file_name":"oxford.pdf","download_url":"https://www.academia.edu/attachments/56123790/download_file","bulk_download_file_name":"A_theory_of_mind_for_visual_AIs.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/56123790/oxford-libre.pdf?1521657302=\u0026response-content-disposition=attachment%3B+filename%3DA_theory_of_mind_for_visual_AIs.pdf\u0026Expires=1743344606\u0026Signature=K~vzgP88vNuW~NxGfCeEphSjxmLzwHrKndJIvVHKZaAxkrh3KlaKfYmahLj8EglvS2z0IbPrVGHJGp8mjTRdw1i-HA4bHEVTOkV1bdegTFxB9kjOal3gafkEJ~DVzggf9ZnkN~EpeUDx4hDdH8DxTpL9LlXhMsZr~RTIiu-cDWRtiTxZLEWeiOOl~RzL52KlQT7deOYOd~yidkx7lIp59D8QXBpNi3UBrTMVw9wKaEacA3r5Re0vgPqTBkwOFSgu8tRJJCYLFl1ZCTghuPF3NdF-e9puh9sx517rv4J04qN77MWiGZqv88vKKztczX1ic13c5QTv2xrXRJZ1JwB52A__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4937,"name":"Theory of Mind","url":"https://www.academia.edu/Documents/in/Theory_of_Mind"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":17701,"name":"Gesture Recognition","url":"https://www.academia.edu/Documents/in/Gesture_Recognition"},{"id":20053,"name":"Theory of Mind (Psychology)","url":"https://www.academia.edu/Documents/in/Theory_of_Mind_Psychology_"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":142848,"name":"Theory of Mind (ToM) / Empathy / Emotion Recognition.","url":"https://www.academia.edu/Documents/in/Theory_of_Mind_ToM_Empathy_Emotion_Recognition"},{"id":315678,"name":"Human Action Recognition","url":"https://www.academia.edu/Documents/in/Human_Action_Recognition"},{"id":1564682,"name":"Artificlai intelligence","url":"https://www.academia.edu/Documents/in/Artificlai_intelligence"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-36221084-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="27105078"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/27105078/Belief_functions_past_present_and_future"><img alt="Research paper thumbnail of Belief functions: past, present and future" class="work-thumbnail" src="https://attachments.academia-assets.com/47355851/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/27105078/Belief_functions_past_present_and_future">Belief functions: past, present and future</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theor...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. Belief theory and the closely related random set theory form a natural framework for modelling situations in which data are missing or scarce: think of extremely rare events such as volcanic eruptions or power plant meltdowns, problems subject to huge uncertainties due to the number and complexity of the factors involved (e.g. climate change), but also the all-important issue with generalisation from small training sets in machine learning. <br /><br />This short talk abstracted from an upcoming half-day tutorial at IJCAI 2016 is designed to introduce to non-experts the principles and rationale of random sets and belief function theory, review its rationale in the context of frequentist and Bayesian interpretations of probability but also in relationship with the other main approaches to non-additive probability, survey the key elements of the methodology and the most recent developments, discuss current trends in both its theory and applications. Finally, a research program for the future is outlined, which include a robustification of Vapnik' statistical learning theory for an Artificial Intelligence 'in the wild'.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-27105078-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-27105078-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127309/figure-16-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_016.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127316/figure-17-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_017.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127322/figure-18-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_018.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127328/figure-19-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_019.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127333/figure-20-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_020.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127338/figure-21-they-are-not-confidence-intervals-either"><img alt="» they are not confidence intervals either: confidence intervals are one-dimensional " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_021.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127342/figure-22-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_022.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127194/figure-1-ual-amsterdam-tutor-bellef-functions-for-the"><img alt="UAL 2015 Amsterdam Tutor: I: Bellef Functions for the Working Sclentist SURRENT VERSION OF IJCAT 2016 PDF SLIDES twork " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127198/figure-2-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127201/figure-3-belief-functions-past-present-and-future"><img alt="‘Knightian’ Uncertainty " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_003.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127205/figure-4-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_004.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127209/figure-5-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_005.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127213/figure-6-see-the-top-face-of-red-die-green-die-and-purple"><img alt="e.g. | see the top face of Red die &), Green die &) and Purple die 3 but, say, | cannot see the outcome of Blue die " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_006.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127220/figure-7-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_007.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127228/figure-8-belief-functions-past-present-and-future"><img alt="Asymptotic happiness " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_008.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127255/figure-9-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_009.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127262/figure-10-investors-prefer-certainty-to-uncertainty-does"><img alt="investors prefer ‘certainty’ to ‘uncertainty’: does ‘certainty’ mean certain outcon of their bets? No, only that they think their models can handle ‘known’ (first-order) uncertainty " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_010.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127270/figure-11-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_011.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127279/figure-12-their-combination-using-dempsters-rule-is-defined"><img alt="9 their combination using Dempster’s rule is defined as: Jempster’s combinatior " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_012.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127286/figure-13-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_013.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127294/figure-14-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_014.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127301/figure-15-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_015.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127349/figure-23-next-section-conditioning-vs-combination"><img alt="@ next section: conditioning vs combination, generalised theorem, propagation " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_023.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127352/figure-24-under-the-usual-measurability-conditions-the"><img alt="» under the usual measurability conditions, the probability space (U, B(U), «) an the multi-valued mapping I induce a belief function Belo,x on X x O » conditioning Beloxx on 6 yields Belx(.|6) ~ f(-|8) on X — conditioning it on X = gives Belo(-|x) on © " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_024.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127357/figure-25-random-set-induced-by-the-likelihood-function-this"><img alt="random set induced by the likelihood function @ this ‘robustifies’ the ML estimate, which is a PDF compatible with the inferred BF " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_025.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127362/figure-26-in-terms-of-belief-and-plausibility-values"><img alt="» in terms of belief and plausibility values, Dempster’s conditioning yields » in terms of belief and plausibility values, Dempster’s conditioning yields Bel(AUB) —Bel(B PI(B)—PI(B\A PI(ANB Belg (A|B) = el AUB) — Bell ) — Ai deri \ ) Pl (A|B) = paging) " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_026.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127364/figure-27-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_027.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127371/figure-28-learn-from-the-training-data-an-approximation-of"><img alt=") learn from the training data an approximation 6 of the unknown mapping between each feature space J; and the pose space O " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_028.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127377/figure-29-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_029.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127380/figure-30-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_030.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127385/figure-31-dempsters-rule-can-be-generalised-to-gbfs-gaussian"><img alt="® Dempster’s rule can be generalised to GBFs ‘Gaussian’ belief functions Dempster and Liu’s , 1996 " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_031.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127391/figure-32-klirs-left-and-desterckes-right-partial"><img alt="» Klir’s (left) and Destercke’s (right) partial hierarchies of uncertainty theories \ hierarchy of uncertainty theories " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_032.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127397/figure-33-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_033.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127403/figure-34-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_034.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127411/figure-35-rough-sets-and-belief-functions-pawlal-probability"><img alt="Rough sets and belief functions (Pawlal probability P on F = o(@/R) can be extended to 2° using inner measur P.(A) = sup { P(X)|X € o(O/R),XC al = P(apr(A)) " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_035.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127417/figure-36-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_036.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127424/figure-37-special-cases-of-random-closed-intervals-fuzzy-set"><img alt="Special cases of random closed intervals @ a fuzzy set on the real line induces a mapping to a collection of nested intervals, parameterised by the level c " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_037.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127430/figure-38-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_038.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127437/figure-39-robust-estimate-of-rare-events-how-does-this"><img alt="@ robust estimate of rare events: how does this relate to results of classical logi regression? how do we use belief functions to be cautious about rare event predictic when we measure a new observation x we plug it into Bekx(.|, x) and Bekx(.|8, x), and get a lower and an upper belief function on Y note that each belief function is really an envelope of logistic functions " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_039.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127444/figure-40-given-fixed-probability-on-this-yields"><img alt="given a (fixed) probability on ©, this yields a parameterised family of random sets » take the classical random experiments which generate a given family of distributions .. » .. and generalise the setting (design) to the case of set-valued observations " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_040.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127453/figure-41-potential-solutions-correspond-to-square-linear"><img alt="@ potential solutions correspond to square linear systems, and form a graph whose noc : : : : ; are linked by linear transformations of columns: e ++ e! = —e + icc & — Nies & @ at each transformation, the most negative component decreases @ general solution based on simpleyx-like ontimisation? @ pictorial representation of the structure of the FEs of a total BF Bel lying in the image of a focal element of Bel of cardinality 3 " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_041.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127462/figure-42-unfortunate-but-predic-tesla-accident"><img alt="@ unfortunate (but predictable) Tesla accident " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_042.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127470/figure-43-unable-to-predict-how-system-will-behave-in"><img alt="» unable to predict how a system will behave in a radically new setting (e.g., how does a smart car cope with driving through extreme weather conditions? » most systems have no way of detecting whether their underlying assumptions have been violated: they will happily continue to predict and act even on inputs that are completely outside the scope of what they have actually learned » it is imperative to ensure that these algorithms behave predictably “in the wild” " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_043.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127478/figure-44-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_044.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127488/figure-45-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_045.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127496/figure-46-alternatively-we-can-adopt-mass-vectors-mp-ac-qj"><img alt="@ alternatively we can adopt mass vectors mp = [1(A), 0 ¢ AC QJ’, living in mass space: M = C/(ma,@ ¢ AC Q) " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_046.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127503/figure-47-the-pointwise-behavior-of-depends-on-the-notions"><img alt="2 the pointwise behavior of 6 depends on the notions of constant mass locus [Cuzzolin, 2004] and of foci {Fy, x € Q} of a conditional subspace " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_047.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127508/figure-48-belief-functions-past-present-and-future"><img alt="" class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_048.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127515/figure-49-earth-sciences-telecoms-etc-in-alone-almost-papers"><img alt="» earth sciences, telecoms, etc @ in 2014 alone, almost 1200 papers were published on belief functions @ new applications are gaining ground, beyond sensor fusion or expert system " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_049.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/42127521/figure-50-shafer-mathematical-theory-of-evidence-shafer"><img alt="G. Shafer. A mathematical theory of evidence. G. Shafer. A mathematical theory of evidence. Princeton University Press, 1976. -or Further Reading " class="figure-slide-image" src="https://figures.academia-assets.com/47355851/figure_050.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-27105078-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="917e9b8fef87d20e309e284a2a610dbb" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47355851,"asset_id":27105078,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47355851/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="27105078"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="27105078"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 27105078; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=27105078]").text(description); $(".js-view-count[data-work-id=27105078]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 27105078; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='27105078']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "917e9b8fef87d20e309e284a2a610dbb" } } $('.js-work-strip[data-work-id=27105078]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":27105078,"title":"Belief functions: past, present and future","translated_title":"","metadata":{"abstract":"The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. Belief theory and the closely related random set theory form a natural framework for modelling situations in which data are missing or scarce: think of extremely rare events such as volcanic eruptions or power plant meltdowns, problems subject to huge uncertainties due to the number and complexity of the factors involved (e.g. climate change), but also the all-important issue with generalisation from small training sets in machine learning. \n\nThis short talk abstracted from an upcoming half-day tutorial at IJCAI 2016 is designed to introduce to non-experts the principles and rationale of random sets and belief function theory, review its rationale in the context of frequentist and Bayesian interpretations of probability but also in relationship with the other main approaches to non-additive probability, survey the key elements of the methodology and the most recent developments, discuss current trends in both its theory and applications. Finally, a research program for the future is outlined, which include a robustification of Vapnik' statistical learning theory for an Artificial Intelligence 'in the wild'. ","location":"Harvard University, Department of Statistics","event_date":{"day":14,"month":7,"year":2016,"errors":{}}},"translated_abstract":"The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. Belief theory and the closely related random set theory form a natural framework for modelling situations in which data are missing or scarce: think of extremely rare events such as volcanic eruptions or power plant meltdowns, problems subject to huge uncertainties due to the number and complexity of the factors involved (e.g. climate change), but also the all-important issue with generalisation from small training sets in machine learning. \n\nThis short talk abstracted from an upcoming half-day tutorial at IJCAI 2016 is designed to introduce to non-experts the principles and rationale of random sets and belief function theory, review its rationale in the context of frequentist and Bayesian interpretations of probability but also in relationship with the other main approaches to non-additive probability, survey the key elements of the methodology and the most recent developments, discuss current trends in both its theory and applications. Finally, a research program for the future is outlined, which include a robustification of Vapnik' statistical learning theory for an Artificial Intelligence 'in the wild'. ","internal_url":"https://www.academia.edu/27105078/Belief_functions_past_present_and_future","translated_internal_url":"","created_at":"2016-07-19T10:15:16.211-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"talk","co_author_tags":[],"downloadable_attachments":[{"id":47355851,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47355851/thumbnails/1.jpg","file_name":"harvard2016v2.pdf","download_url":"https://www.academia.edu/attachments/47355851/download_file","bulk_download_file_name":"Belief_functions_past_present_and_future.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47355851/harvard2016v2-libre.pdf?1468948670=\u0026response-content-disposition=attachment%3B+filename%3DBelief_functions_past_present_and_future.pdf\u0026Expires=1743344606\u0026Signature=SCXT~iWyFSThgZp6EL2uyn-Qnxk5Wy327ZNKsv5Bm0xR0lJg3RLhSeoE74ABLYnWWS6NhxWtFBYtIxTOouP635IybdVGl-ddThqz0m6Dm70z2RSBgvq42NZBzcZ4L1CWjLf6M63s8mNYaFoYaWDihC11rfDYelFxMZ~ULEF64YSDakFpG7opFqe2DfZocxmMr9LS03KUheXbiJl5LFQx1citliO2UCmtzrb8dwYg7lN9U4vOXyCMmGMiPyV0pv-TM1ETXNoTcLPCZ2-jI5dORPx16wG9WhX~tVgCaLRmUwsM10Hw--SRhQQXhHaphAjhYOaqYumZf-w33w7gBNuajQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Belief_functions_past_present_and_future","translated_slug":"","page_count":155,"language":"en","content_type":"Work","summary":"The theory of belief functions, sometimes referred to as evidence theory or Dempster-Shafer theory, was first introduced by Arthur P. Dempster in the context of statistical inference, to be later developed by Glenn Shafer as a general framework for modelling epistemic uncertainty. Belief theory and the closely related random set theory form a natural framework for modelling situations in which data are missing or scarce: think of extremely rare events such as volcanic eruptions or power plant meltdowns, problems subject to huge uncertainties due to the number and complexity of the factors involved (e.g. climate change), but also the all-important issue with generalisation from small training sets in machine learning. \n\nThis short talk abstracted from an upcoming half-day tutorial at IJCAI 2016 is designed to introduce to non-experts the principles and rationale of random sets and belief function theory, review its rationale in the context of frequentist and Bayesian interpretations of probability but also in relationship with the other main approaches to non-additive probability, survey the key elements of the methodology and the most recent developments, discuss current trends in both its theory and applications. Finally, a research program for the future is outlined, which include a robustification of Vapnik' statistical learning theory for an Artificial Intelligence 'in the wild'. ","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":47355851,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47355851/thumbnails/1.jpg","file_name":"harvard2016v2.pdf","download_url":"https://www.academia.edu/attachments/47355851/download_file","bulk_download_file_name":"Belief_functions_past_present_and_future.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47355851/harvard2016v2-libre.pdf?1468948670=\u0026response-content-disposition=attachment%3B+filename%3DBelief_functions_past_present_and_future.pdf\u0026Expires=1743344606\u0026Signature=SCXT~iWyFSThgZp6EL2uyn-Qnxk5Wy327ZNKsv5Bm0xR0lJg3RLhSeoE74ABLYnWWS6NhxWtFBYtIxTOouP635IybdVGl-ddThqz0m6Dm70z2RSBgvq42NZBzcZ4L1CWjLf6M63s8mNYaFoYaWDihC11rfDYelFxMZ~ULEF64YSDakFpG7opFqe2DfZocxmMr9LS03KUheXbiJl5LFQx1citliO2UCmtzrb8dwYg7lN9U4vOXyCMmGMiPyV0pv-TM1ETXNoTcLPCZ2-jI5dORPx16wG9WhX~tVgCaLRmUwsM10Hw--SRhQQXhHaphAjhYOaqYumZf-w33w7gBNuajQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4060,"name":"Applied Statistics","url":"https://www.academia.edu/Documents/in/Applied_Statistics"},{"id":5394,"name":"Fuzzy set theory","url":"https://www.academia.edu/Documents/in/Fuzzy_set_theory"},{"id":6132,"name":"Soft Computing","url":"https://www.academia.edu/Documents/in/Soft_Computing"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":13000,"name":"Dempster-Shafer Analysis","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Analysis"},{"id":15084,"name":"Statistical machine learning","url":"https://www.academia.edu/Documents/in/Statistical_machine_learning"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":290552,"name":"Uncertainty analysis","url":"https://www.academia.edu/Documents/in/Uncertainty_analysis"},{"id":304534,"name":"Dempster-Shafer Theory of Evidence","url":"https://www.academia.edu/Documents/in/Dempster-Shafer_Theory_of_Evidence"},{"id":1005286,"name":"Dempster Shafer Theory","url":"https://www.academia.edu/Documents/in/Dempster_Shafer_Theory"}],"urls":[{"id":7341539,"url":"http://cms.brookes.ac.uk/staff/FabioCuzzolin/files/harvard2016v2.pdf"},{"id":7504650,"url":"https://www.youtube.com/watch?v=l9XKJKgkURQ"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-27105078-figures'); } }); </script> </div><div class="profile--tab_content_container js-tab-pane tab-pane" data-section-id="5532151" id="drafts"><div class="js-work-strip profile--work_container" data-work-id="71219126"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/71219126/_International_Workshop_on_Continual_Semi_Supervised_Learning_Introduction_Benchmarks_and_Baselines"><img alt="Research paper thumbnail of .International Workshop on Continual Semi-Supervised Learning: Introduction, Benchmarks and Baselines" class="work-thumbnail" src="https://attachments.academia-assets.com/80659657/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/71219126/_International_Workshop_on_Continual_Semi_Supervised_Learning_Introduction_Benchmarks_and_Baselines">.International Workshop on Continual Semi-Supervised Learning: Introduction, Benchmarks and Baselines</a></div><div class="wp-workCard_item"><span>The First International Workshop on Continual Semi-Supervised Learning (CSSL @ IJCAI 2021)</span><span>, 2021</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">The aim of this paper is to formalise a new continual semi-supervised learning (CSSL) paradigm, p...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">The aim of this paper is to formalise a new continual semi-supervised learning (CSSL) paradigm, proposed to the attention of the machine learning community via the IJCAI 2021 International Workshop on Continual Semi-Supervised Learning (CSSL@IJCAI), with the aim of raising the field’s awareness about this problem and mobilising its effort in this direction. After a formal definition of continual semi-supervised learning and the appropriate training and testing protocols, the paper introduces two new benchmarks specifically designed to assess CSSL on two important computer vision tasks: activity recognition and crowd counting. We describe the Continual Activity Recognition (CAR) and Continual Crowd Counting (CCC) challenges built upon those benchmarks, the baseline models proposed for the challenges, and describe a simple CSSL baseline which consists in applying batch self-training in temporal sessions, for a limited number of rounds. The results show that learning from unlabelled data streams is extremely challenging, and stimulate the search for methods that can encode the dynamics of the data stream.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="fd23af96948573bab1ea6351b46068b0" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":80659657,"asset_id":71219126,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/80659657/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="71219126"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="71219126"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 71219126; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=71219126]").text(description); $(".js-view-count[data-work-id=71219126]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 71219126; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='71219126']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "fd23af96948573bab1ea6351b46068b0" } } $('.js-work-strip[data-work-id=71219126]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":71219126,"title":".International Workshop on Continual Semi-Supervised Learning: Introduction, Benchmarks and Baselines","translated_title":"","metadata":{"abstract":"The aim of this paper is to formalise a new continual semi-supervised learning (CSSL) paradigm, proposed to the attention of the machine learning community via the IJCAI 2021 International Workshop on Continual Semi-Supervised Learning (CSSL@IJCAI), with the aim of raising the field’s awareness about this problem and mobilising its effort in this direction. After a formal definition of continual semi-supervised learning and the appropriate training and testing protocols, the paper introduces two new benchmarks specifically designed to assess CSSL on two important computer vision tasks: activity recognition and crowd counting. We describe the Continual Activity Recognition (CAR) and Continual Crowd Counting (CCC) challenges built upon those benchmarks, the baseline models proposed for the challenges, and describe a simple CSSL baseline which consists in applying batch self-training in temporal sessions, for a limited number of rounds. The results show that learning from unlabelled data streams is extremely challenging, and stimulate the search for methods that can encode the dynamics of the data stream.","ai_title_tag":"Continual Semi-Supervised Learning: Challenges and Benchmarks","publication_date":{"day":null,"month":null,"year":2021,"errors":{}},"publication_name":"The First International Workshop on Continual Semi-Supervised Learning (CSSL @ IJCAI 2021)"},"translated_abstract":"The aim of this paper is to formalise a new continual semi-supervised learning (CSSL) paradigm, proposed to the attention of the machine learning community via the IJCAI 2021 International Workshop on Continual Semi-Supervised Learning (CSSL@IJCAI), with the aim of raising the field’s awareness about this problem and mobilising its effort in this direction. After a formal definition of continual semi-supervised learning and the appropriate training and testing protocols, the paper introduces two new benchmarks specifically designed to assess CSSL on two important computer vision tasks: activity recognition and crowd counting. We describe the Continual Activity Recognition (CAR) and Continual Crowd Counting (CCC) challenges built upon those benchmarks, the baseline models proposed for the challenges, and describe a simple CSSL baseline which consists in applying batch self-training in temporal sessions, for a limited number of rounds. The results show that learning from unlabelled data streams is extremely challenging, and stimulate the search for methods that can encode the dynamics of the data stream.","internal_url":"https://www.academia.edu/71219126/_International_Workshop_on_Continual_Semi_Supervised_Learning_Introduction_Benchmarks_and_Baselines","translated_internal_url":"","created_at":"2022-02-12T08:22:11.763-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"draft","co_author_tags":[],"downloadable_attachments":[{"id":80659657,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/80659657/thumbnails/1.jpg","file_name":"2110.14613.pdf","download_url":"https://www.academia.edu/attachments/80659657/download_file","bulk_download_file_name":"International_Workshop_on_Continual_Sem.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/80659657/2110.14613-libre.pdf?1644685337=\u0026response-content-disposition=attachment%3B+filename%3DInternational_Workshop_on_Continual_Sem.pdf\u0026Expires=1743344606\u0026Signature=COpRWyI8Zk9pXOABSEYUYuCfD5tGvlAGN61G5e7u64BzmFm3mE-SAwuTBiPV7-60G5AK3iDEIDGgukeMPPO3TXD6zL517z69BMXJRJP85SxHBWmrdlNgDjgr3a-pHilXFLhyBK8y21f-yLYGYGL0wsync2OZzRQdWe0Mkt8pbxEYzA-C4P2nHFzSYfoSl21cVG~ghjpuDWIGjkoko4NdN13u5-5Vl3Fj51FohRXUQnYzaEgS79h4KUBhdClp83wwbP6dsNEIaT6BakSFSFQ3AQy4O-LPLzVuOFeYdCdkMeOVT2fyd7j9GoVCJvnbWNWDAc-h5UcM3fIqkpfG4LXHiA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"},{"id":80659853,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/80659853/thumbnails/1.jpg","file_name":"CSSL_IJCAI_poster.pdf","download_url":"https://www.academia.edu/attachments/80659853/download_file","bulk_download_file_name":"International_Workshop_on_Continual_Sem.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/80659853/CSSL_IJCAI_poster-libre.pdf?1644685325=\u0026response-content-disposition=attachment%3B+filename%3DInternational_Workshop_on_Continual_Sem.pdf\u0026Expires=1743344606\u0026Signature=LMUtZfBRdmN5V~-3Uq7XOkfscOpx0dk~FfexVkZP--SjX-JRYYMcjj4UkuM~-517ObNM2Ur5XtKA7pQHWFIbT6P-rLgdvKrbs68D2MbHRqaZF1EuTBArjIHogObp1ZsK1oAz18rY-Z3OI2pKnorXEpWs43sn3sq9y4WlB3Jbdtp~9WL6dl1kJcHm2RZhD6v6qW2x~BZ8~ITZ6ehZn88pF24EgY4CL4VItnxZLbx~mlcLDl1qUrbSNcL-gidzfoPkrRvFbWkigHSd8Wc9Zln83zhcH~TU-Tm0n4IlYf1vy2UuEzEswiac9c5eRXIqEtLj0Q1JZAj9yYzbcAgLaJMNuQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"_International_Workshop_on_Continual_Semi_Supervised_Learning_Introduction_Benchmarks_and_Baselines","translated_slug":"","page_count":8,"language":"en","content_type":"Work","summary":"The aim of this paper is to formalise a new continual semi-supervised learning (CSSL) paradigm, proposed to the attention of the machine learning community via the IJCAI 2021 International Workshop on Continual Semi-Supervised Learning (CSSL@IJCAI), with the aim of raising the field’s awareness about this problem and mobilising its effort in this direction. After a formal definition of continual semi-supervised learning and the appropriate training and testing protocols, the paper introduces two new benchmarks specifically designed to assess CSSL on two important computer vision tasks: activity recognition and crowd counting. We describe the Continual Activity Recognition (CAR) and Continual Crowd Counting (CCC) challenges built upon those benchmarks, the baseline models proposed for the challenges, and describe a simple CSSL baseline which consists in applying batch self-training in temporal sessions, for a limited number of rounds. The results show that learning from unlabelled data streams is extremely challenging, and stimulate the search for methods that can encode the dynamics of the data stream.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":80659657,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/80659657/thumbnails/1.jpg","file_name":"2110.14613.pdf","download_url":"https://www.academia.edu/attachments/80659657/download_file","bulk_download_file_name":"International_Workshop_on_Continual_Sem.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/80659657/2110.14613-libre.pdf?1644685337=\u0026response-content-disposition=attachment%3B+filename%3DInternational_Workshop_on_Continual_Sem.pdf\u0026Expires=1743344606\u0026Signature=COpRWyI8Zk9pXOABSEYUYuCfD5tGvlAGN61G5e7u64BzmFm3mE-SAwuTBiPV7-60G5AK3iDEIDGgukeMPPO3TXD6zL517z69BMXJRJP85SxHBWmrdlNgDjgr3a-pHilXFLhyBK8y21f-yLYGYGL0wsync2OZzRQdWe0Mkt8pbxEYzA-C4P2nHFzSYfoSl21cVG~ghjpuDWIGjkoko4NdN13u5-5Vl3Fj51FohRXUQnYzaEgS79h4KUBhdClp83wwbP6dsNEIaT6BakSFSFQ3AQy4O-LPLzVuOFeYdCdkMeOVT2fyd7j9GoVCJvnbWNWDAc-h5UcM3fIqkpfG4LXHiA__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"},{"id":80659853,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/80659853/thumbnails/1.jpg","file_name":"CSSL_IJCAI_poster.pdf","download_url":"https://www.academia.edu/attachments/80659853/download_file","bulk_download_file_name":"International_Workshop_on_Continual_Sem.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/80659853/CSSL_IJCAI_poster-libre.pdf?1644685325=\u0026response-content-disposition=attachment%3B+filename%3DInternational_Workshop_on_Continual_Sem.pdf\u0026Expires=1743344606\u0026Signature=LMUtZfBRdmN5V~-3Uq7XOkfscOpx0dk~FfexVkZP--SjX-JRYYMcjj4UkuM~-517ObNM2Ur5XtKA7pQHWFIbT6P-rLgdvKrbs68D2MbHRqaZF1EuTBArjIHogObp1ZsK1oAz18rY-Z3OI2pKnorXEpWs43sn3sq9y4WlB3Jbdtp~9WL6dl1kJcHm2RZhD6v6qW2x~BZ8~ITZ6ehZn88pF24EgY4CL4VItnxZLbx~mlcLDl1qUrbSNcL-gidzfoPkrRvFbWkigHSd8Wc9Zln83zhcH~TU-Tm0n4IlYf1vy2UuEzEswiac9c5eRXIqEtLj0Q1JZAj9yYzbcAgLaJMNuQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":7937,"name":"Image Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Image_Recognition_Computer_Vision_"},{"id":15665,"name":"Video Processing","url":"https://www.academia.edu/Documents/in/Video_Processing"},{"id":27008,"name":"Unsupervised Learning Techniques","url":"https://www.academia.edu/Documents/in/Unsupervised_Learning_Techniques"},{"id":42835,"name":"Video Analysis","url":"https://www.academia.edu/Documents/in/Video_Analysis"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":299247,"name":"Crowd Counting","url":"https://www.academia.edu/Documents/in/Crowd_Counting"},{"id":315678,"name":"Human Action Recognition","url":"https://www.academia.edu/Documents/in/Human_Action_Recognition"},{"id":332460,"name":"Continual Learning","url":"https://www.academia.edu/Documents/in/Continual_Learning"}],"urls":[{"id":17562990,"url":"https://cms.brookes.ac.uk/staff/FabioCuzzolin/files/IJCAI_21_CSSL.pdf"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-71219126-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="71217276"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/71217276/YOLO_Z_Improving_small_object_detection_in_YOLOv5_for_autonomous_vehicles"><img alt="Research paper thumbnail of YOLO-Z: Improving small object detection in YOLOv5 for autonomous vehicles" class="work-thumbnail" src="https://attachments.academia-assets.com/80658600/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/71217276/YOLO_Z_Improving_small_object_detection_in_YOLOv5_for_autonomous_vehicles">YOLO-Z: Improving small object detection in YOLOv5 for autonomous vehicles</a></div><div class="wp-workCard_item"><span>ICCV 2021 Workshop: The ROAD challenge: Event Detection for Situation Awareness in Autonomous Driving</span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">As autonomous vehicles and autonomous racing rise in popularity, so does the need for faster and ...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">As autonomous vehicles and autonomous racing rise in popularity, so does the need for faster and more accurate detectors. While our naked eyes are able to extract contextual information almost instantly, even from far away, image resolution and computational resources limitations<br />make detecting smaller objects (that is, objects that occupy a small pixel area in the input image) a truly challenging task for machines and a wide open research field. <br /><br />This study explores ways in which the popular YOLOv5 object detector can be modified to improve its performance in detecting smaller objects, with a particular focus on its application to autonomous racing. To achieve this, we investigate how replacing certain structural elements of the<br />model (as well as their connections and other parameters) can affect performance and inference time. In doing so, we propose a series of models at different scales, which we name ‘YOLO-Z’, and which display an improvement of up to 6.9% in mAP when detecting smaller objects at 50% IOU, at a cost of just a 3ms increase in inference time compared to the original YOLOv5.<br /><br />Our objective is not only to inform future research on the potential of adjusting a popular detector such as YOLOv5 to address specific tasks, but also to provide insights on how specific changes can impact small object detection. Such findings, applied to the wider context of autonomous vehicles, could increase the amount of contextual information available to such systems.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="4d68208e9da4b585de52a9ce73597203" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":80658600,"asset_id":71217276,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/80658600/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="71217276"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="71217276"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 71217276; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=71217276]").text(description); $(".js-view-count[data-work-id=71217276]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 71217276; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='71217276']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "4d68208e9da4b585de52a9ce73597203" } } $('.js-work-strip[data-work-id=71217276]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":71217276,"title":"YOLO-Z: Improving small object detection in YOLOv5 for autonomous vehicles","translated_title":"","metadata":{"abstract":"As autonomous vehicles and autonomous racing rise in popularity, so does the need for faster and more accurate detectors. While our naked eyes are able to extract contextual information almost instantly, even from far away, image resolution and computational resources limitations\nmake detecting smaller objects (that is, objects that occupy a small pixel area in the input image) a truly challenging task for machines and a wide open research field. \n\nThis study explores ways in which the popular YOLOv5 object detector can be modified to improve its performance in detecting smaller objects, with a particular focus on its application to autonomous racing. To achieve this, we investigate how replacing certain structural elements of the\nmodel (as well as their connections and other parameters) can affect performance and inference time. In doing so, we propose a series of models at different scales, which we name ‘YOLO-Z’, and which display an improvement of up to 6.9% in mAP when detecting smaller objects at 50% IOU, at a cost of just a 3ms increase in inference time compared to the original YOLOv5.\n\nOur objective is not only to inform future research on the potential of adjusting a popular detector such as YOLOv5 to address specific tasks, but also to provide insights on how specific changes can impact small object detection. Such findings, applied to the wider context of autonomous vehicles, could increase the amount of contextual information available to such systems.","publication_name":"ICCV 2021 Workshop: The ROAD challenge: Event Detection for Situation Awareness in Autonomous Driving"},"translated_abstract":"As autonomous vehicles and autonomous racing rise in popularity, so does the need for faster and more accurate detectors. While our naked eyes are able to extract contextual information almost instantly, even from far away, image resolution and computational resources limitations\nmake detecting smaller objects (that is, objects that occupy a small pixel area in the input image) a truly challenging task for machines and a wide open research field. \n\nThis study explores ways in which the popular YOLOv5 object detector can be modified to improve its performance in detecting smaller objects, with a particular focus on its application to autonomous racing. To achieve this, we investigate how replacing certain structural elements of the\nmodel (as well as their connections and other parameters) can affect performance and inference time. In doing so, we propose a series of models at different scales, which we name ‘YOLO-Z’, and which display an improvement of up to 6.9% in mAP when detecting smaller objects at 50% IOU, at a cost of just a 3ms increase in inference time compared to the original YOLOv5.\n\nOur objective is not only to inform future research on the potential of adjusting a popular detector such as YOLOv5 to address specific tasks, but also to provide insights on how specific changes can impact small object detection. Such findings, applied to the wider context of autonomous vehicles, could increase the amount of contextual information available to such systems.","internal_url":"https://www.academia.edu/71217276/YOLO_Z_Improving_small_object_detection_in_YOLOv5_for_autonomous_vehicles","translated_internal_url":"","created_at":"2022-02-12T08:07:18.277-08:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"draft","co_author_tags":[],"downloadable_attachments":[{"id":80658600,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/80658600/thumbnails/1.jpg","file_name":"ROAD2021_paper.pdf","download_url":"https://www.academia.edu/attachments/80658600/download_file","bulk_download_file_name":"YOLO_Z_Improving_small_object_detection.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/80658600/ROAD2021_paper-libre.pdf?1644685414=\u0026response-content-disposition=attachment%3B+filename%3DYOLO_Z_Improving_small_object_detection.pdf\u0026Expires=1743344606\u0026Signature=aNea~Q9tK~VDUxyhwAmAuZPPkw0g-mr5AZgEKL-OS8deB9fwqnhD7EDCHim1j3q2rv3v5RLcecH~ENB1uyiNI~8DeyTy8wLhvPdKnTCF0sCB49nfGiOWRUCN3TFYztaCxCNZJQ8ckcjQMCGTBv1FvXAFJvg7wr2y8X7ko5-fWLM9nfeEbxxigPFappVTVixE4D6UQRgcSzLDyvx3isEEMb8f0YkKKIeRkm~0GtqP60AJsQPf9yGbRBPBMsmNIK7jV8bRl-r61prdk7DBvvFkbNJTpwhWmjtXdej~TJKTJD78uXhypSsRDviR0tDsFSMta-Dsx3VQZEUoCMlAl3QdVQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"YOLO_Z_Improving_small_object_detection_in_YOLOv5_for_autonomous_vehicles","translated_slug":"","page_count":9,"language":"en","content_type":"Work","summary":"As autonomous vehicles and autonomous racing rise in popularity, so does the need for faster and more accurate detectors. While our naked eyes are able to extract contextual information almost instantly, even from far away, image resolution and computational resources limitations\nmake detecting smaller objects (that is, objects that occupy a small pixel area in the input image) a truly challenging task for machines and a wide open research field. \n\nThis study explores ways in which the popular YOLOv5 object detector can be modified to improve its performance in detecting smaller objects, with a particular focus on its application to autonomous racing. To achieve this, we investigate how replacing certain structural elements of the\nmodel (as well as their connections and other parameters) can affect performance and inference time. In doing so, we propose a series of models at different scales, which we name ‘YOLO-Z’, and which display an improvement of up to 6.9% in mAP when detecting smaller objects at 50% IOU, at a cost of just a 3ms increase in inference time compared to the original YOLOv5.\n\nOur objective is not only to inform future research on the potential of adjusting a popular detector such as YOLOv5 to address specific tasks, but also to provide insights on how specific changes can impact small object detection. Such findings, applied to the wider context of autonomous vehicles, could increase the amount of contextual information available to such systems.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":80658600,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/80658600/thumbnails/1.jpg","file_name":"ROAD2021_paper.pdf","download_url":"https://www.academia.edu/attachments/80658600/download_file","bulk_download_file_name":"YOLO_Z_Improving_small_object_detection.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/80658600/ROAD2021_paper-libre.pdf?1644685414=\u0026response-content-disposition=attachment%3B+filename%3DYOLO_Z_Improving_small_object_detection.pdf\u0026Expires=1743344606\u0026Signature=aNea~Q9tK~VDUxyhwAmAuZPPkw0g-mr5AZgEKL-OS8deB9fwqnhD7EDCHim1j3q2rv3v5RLcecH~ENB1uyiNI~8DeyTy8wLhvPdKnTCF0sCB49nfGiOWRUCN3TFYztaCxCNZJQ8ckcjQMCGTBv1FvXAFJvg7wr2y8X7ko5-fWLM9nfeEbxxigPFappVTVixE4D6UQRgcSzLDyvx3isEEMb8f0YkKKIeRkm~0GtqP60AJsQPf9yGbRBPBMsmNIK7jV8bRl-r61prdk7DBvvFkbNJTpwhWmjtXdej~TJKTJD78uXhypSsRDviR0tDsFSMta-Dsx3VQZEUoCMlAl3QdVQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":422,"name":"Computer Science","url":"https://www.academia.edu/Documents/in/Computer_Science"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":867,"name":"Perception","url":"https://www.academia.edu/Documents/in/Perception"},{"id":1185,"name":"Image Processing","url":"https://www.academia.edu/Documents/in/Image_Processing"},{"id":7728,"name":"Object Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Object_Recognition_Computer_Vision_"},{"id":15158,"name":"Object Tracking (Computer Vision)","url":"https://www.academia.edu/Documents/in/Object_Tracking_Computer_Vision_"},{"id":15665,"name":"Video Processing","url":"https://www.academia.edu/Documents/in/Video_Processing"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":68477,"name":"Robotics, Computer Vision, Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Robotics_Computer_Vision_Artificial_Intelligence"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":84688,"name":"Autonomous Vehicles","url":"https://www.academia.edu/Documents/in/Autonomous_Vehicles"},{"id":204921,"name":"Autonomous driving","url":"https://www.academia.edu/Documents/in/Autonomous_driving"},{"id":327599,"name":"Computer Vision, Behaviour Modelling, Deep Learning","url":"https://www.academia.edu/Documents/in/Computer_Vision_Behaviour_Modelling_Deep_Learning"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-71217276-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="37182805"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/37182805/Spatio_temporal_Human_Action_Localisation_and_Instance_Segmentation_in_Temporally_Untrimmed_Videos"><img alt="Research paper thumbnail of Spatio-temporal Human Action Localisation and Instance Segmentation in Temporally Untrimmed Videos" class="work-thumbnail" src="https://attachments.academia-assets.com/57134209/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/37182805/Spatio_temporal_Human_Action_Localisation_and_Instance_Segmentation_in_Temporally_Untrimmed_Videos">Spatio-temporal Human Action Localisation and Instance Segmentation in Temporally Untrimmed Videos</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Current state-of-the-art human action recognition is fo-cused on the classification of temporally...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Current state-of-the-art human action recognition is fo-cused on the classification of temporally trimmed videos in which only one action occurs per frame. In this work we address the problem of action localisation and instance segmentation in which multiple concurrent actions of the same class may be segmented out of an image sequence. We cast the action tube extraction as an energy maximisa-tion problem in which configurations of region proposals in each frame are assigned a cost and the best action tubes are selected via two passes of dynamic programming. One pass associates region proposals in space and time for each action category, and another pass is used to solve for the tube's temporal extent and to enforce a smooth label sequence through the video. In addition, by taking advantage of recent work on action foreground-background seg-mentation, we are able to associate each tube with class-specific segmentations. We demonstrate the performance of our algorithm on the challenging LIRIS-HARL dataset and achieve a new state-of-the-art result which is 14.3 times better than previous methods.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="f01716aea1e4f16ba03fada1f5cd246b" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":57134209,"asset_id":37182805,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/57134209/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="37182805"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="37182805"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 37182805; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=37182805]").text(description); $(".js-view-count[data-work-id=37182805]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 37182805; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='37182805']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "f01716aea1e4f16ba03fada1f5cd246b" } } $('.js-work-strip[data-work-id=37182805]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":37182805,"title":"Spatio-temporal Human Action Localisation and Instance Segmentation in Temporally Untrimmed Videos","translated_title":"","metadata":{"abstract":"Current state-of-the-art human action recognition is fo-cused on the classification of temporally trimmed videos in which only one action occurs per frame. In this work we address the problem of action localisation and instance segmentation in which multiple concurrent actions of the same class may be segmented out of an image sequence. We cast the action tube extraction as an energy maximisa-tion problem in which configurations of region proposals in each frame are assigned a cost and the best action tubes are selected via two passes of dynamic programming. One pass associates region proposals in space and time for each action category, and another pass is used to solve for the tube's temporal extent and to enforce a smooth label sequence through the video. In addition, by taking advantage of recent work on action foreground-background seg-mentation, we are able to associate each tube with class-specific segmentations. We demonstrate the performance of our algorithm on the challenging LIRIS-HARL dataset and achieve a new state-of-the-art result which is 14.3 times better than previous methods.","ai_title_tag":"Action Localization and Segmentation in Untrimmed Videos"},"translated_abstract":"Current state-of-the-art human action recognition is fo-cused on the classification of temporally trimmed videos in which only one action occurs per frame. In this work we address the problem of action localisation and instance segmentation in which multiple concurrent actions of the same class may be segmented out of an image sequence. We cast the action tube extraction as an energy maximisa-tion problem in which configurations of region proposals in each frame are assigned a cost and the best action tubes are selected via two passes of dynamic programming. One pass associates region proposals in space and time for each action category, and another pass is used to solve for the tube's temporal extent and to enforce a smooth label sequence through the video. In addition, by taking advantage of recent work on action foreground-background seg-mentation, we are able to associate each tube with class-specific segmentations. We demonstrate the performance of our algorithm on the challenging LIRIS-HARL dataset and achieve a new state-of-the-art result which is 14.3 times better than previous methods.","internal_url":"https://www.academia.edu/37182805/Spatio_temporal_Human_Action_Localisation_and_Instance_Segmentation_in_Temporally_Untrimmed_Videos","translated_internal_url":"","created_at":"2018-08-05T05:38:40.206-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"draft","co_author_tags":[{"id":31768679,"work_id":37182805,"tagging_user_id":366407,"tagged_user_id":51192593,"co_author_invite_id":null,"email":"s***4@brookes.ac.uk","display_order":1,"name":"Suman Saha","title":"Spatio-temporal Human Action Localisation and Instance Segmentation in Temporally Untrimmed Videos"},{"id":31768680,"work_id":37182805,"tagging_user_id":366407,"tagged_user_id":4032373,"co_author_invite_id":null,"email":"g***4@gmail.com","display_order":2,"name":"Gurkirt Singh","title":"Spatio-temporal Human Action Localisation and Instance Segmentation in Temporally Untrimmed Videos"},{"id":31768681,"work_id":37182805,"tagging_user_id":366407,"tagged_user_id":35748463,"co_author_invite_id":null,"email":"m***a@eng.ox.ac.uk","display_order":3,"name":"Michael Sapienza","title":"Spatio-temporal Human Action Localisation and Instance Segmentation in Temporally Untrimmed Videos"},{"id":31768682,"work_id":37182805,"tagging_user_id":366407,"tagged_user_id":312333,"co_author_invite_id":null,"email":"p***r@hotmail.com","affiliation":"Oxford Brookes University","display_order":4,"name":"philip torr","title":"Spatio-temporal Human Action Localisation and Instance Segmentation in Temporally Untrimmed Videos"},{"id":31768683,"work_id":37182805,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5034152,"email":"f***n@gmail.com","display_order":5,"name":"Fabio Cuzzolin","title":"Spatio-temporal Human Action Localisation and Instance Segmentation in Temporally Untrimmed Videos"}],"downloadable_attachments":[{"id":57134209,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57134209/thumbnails/1.jpg","file_name":"1707.07213.pdf","download_url":"https://www.academia.edu/attachments/57134209/download_file","bulk_download_file_name":"Spatio_temporal_Human_Action_Localisatio.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57134209/1707.07213-libre.pdf?1533473655=\u0026response-content-disposition=attachment%3B+filename%3DSpatio_temporal_Human_Action_Localisatio.pdf\u0026Expires=1743344606\u0026Signature=c1cDMOWQZiHr8BwFpzFRCgqlU5gPCveIAFuDcAh~bide68ickw7tJXAPlVgxWeL4Q52bVk~LTTlpZ5oah18ldR4LjcGw8vCtdLRxs88tDy4DOVRFXJySKGPnwGDeVneL2RQw3vC2ITfGYguOghdC2rmeecRtW~~4BgxU-gWM7iPNIzsmpdiiuIM7QulhyRL6c5ExdpmvzY-8NL-BOZpYh6WYDW~N0MHDA2G8sqMaeUujv8Ox90~uUm5zpRao6yJ9q4APRBZiDI5SahqxmCXOce3g7t4KtKWYpM27WJDVmKS-gOHMZXwgYrmrqqYs4gy7NE4Zva~RsN4BU-ta6aSHiw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Spatio_temporal_Human_Action_Localisation_and_Instance_Segmentation_in_Temporally_Untrimmed_Videos","translated_slug":"","page_count":10,"language":"en","content_type":"Work","summary":"Current state-of-the-art human action recognition is fo-cused on the classification of temporally trimmed videos in which only one action occurs per frame. In this work we address the problem of action localisation and instance segmentation in which multiple concurrent actions of the same class may be segmented out of an image sequence. We cast the action tube extraction as an energy maximisa-tion problem in which configurations of region proposals in each frame are assigned a cost and the best action tubes are selected via two passes of dynamic programming. One pass associates region proposals in space and time for each action category, and another pass is used to solve for the tube's temporal extent and to enforce a smooth label sequence through the video. In addition, by taking advantage of recent work on action foreground-background seg-mentation, we are able to associate each tube with class-specific segmentations. We demonstrate the performance of our algorithm on the challenging LIRIS-HARL dataset and achieve a new state-of-the-art result which is 14.3 times better than previous methods.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":57134209,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57134209/thumbnails/1.jpg","file_name":"1707.07213.pdf","download_url":"https://www.academia.edu/attachments/57134209/download_file","bulk_download_file_name":"Spatio_temporal_Human_Action_Localisatio.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57134209/1707.07213-libre.pdf?1533473655=\u0026response-content-disposition=attachment%3B+filename%3DSpatio_temporal_Human_Action_Localisatio.pdf\u0026Expires=1743344606\u0026Signature=c1cDMOWQZiHr8BwFpzFRCgqlU5gPCveIAFuDcAh~bide68ickw7tJXAPlVgxWeL4Q52bVk~LTTlpZ5oah18ldR4LjcGw8vCtdLRxs88tDy4DOVRFXJySKGPnwGDeVneL2RQw3vC2ITfGYguOghdC2rmeecRtW~~4BgxU-gWM7iPNIzsmpdiiuIM7QulhyRL6c5ExdpmvzY-8NL-BOZpYh6WYDW~N0MHDA2G8sqMaeUujv8Ox90~uUm5zpRao6yJ9q4APRBZiDI5SahqxmCXOce3g7t4KtKWYpM27WJDVmKS-gOHMZXwgYrmrqqYs4gy7NE4Zva~RsN4BU-ta6aSHiw__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":7728,"name":"Object Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Object_Recognition_Computer_Vision_"},{"id":10005,"name":"Applications of Machine Learning","url":"https://www.academia.edu/Documents/in/Applications_of_Machine_Learning"},{"id":14417,"name":"Machine Vision","url":"https://www.academia.edu/Documents/in/Machine_Vision"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":143038,"name":"Machine Learning and Pattern Recognition","url":"https://www.academia.edu/Documents/in/Machine_Learning_and_Pattern_Recognition"},{"id":315678,"name":"Human Action Recognition","url":"https://www.academia.edu/Documents/in/Human_Action_Recognition"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-37182805-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="37182750"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/37182750/TraMNet_Transition_Matrix_Network_for_Efficient_Action_Tube_Proposals"><img alt="Research paper thumbnail of TraMNet -Transition Matrix Network for Efficient Action Tube Proposals" class="work-thumbnail" src="https://attachments.academia-assets.com/57134144/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/37182750/TraMNet_Transition_Matrix_Network_for_Efficient_Action_Tube_Proposals">TraMNet -Transition Matrix Network for Efficient Action Tube Proposals</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Current state-of-the-art methods solve spatio-temporal action localisation by extending 2D anchor...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Current state-of-the-art methods solve spatio-temporal action localisation by extending 2D anchors to 3D-cuboid proposals on stacks of frames, to generate sets of temporally connected bounding boxes called action micro-tubes. However, they fail to consider that the underlying anchor proposal hypotheses should also move (transition) from frame to frame, as the actor or the camera do. Assuming we evaluate n 2D anchors in each frame, then the number of possible transitions from each 2D anchor to he next, for a sequence of f consecutive frames, is in the order of O(n f), expensive even for small values of f. To avoid this problem we introduce a Transition-Matrix-based Network (TraMNet) which relies on computing transition probabilities between anchor proposals while maximising their overlap with ground truth bounding boxes across frames, and enforcing sparsity via a transition threshold. As the resulting transition matrix is sparse and stochastic, this reduces the proposal hypothesis search space from O(n f) to the cardinality of the thresholded matrix. At training time, transitions are specific to cell locations of the feature maps, so that a sparse (efficient) transition matrix is used to train the network. At test time, a denser transition matrix can be obtained either by decreasing the threshold or by adding to it all the relative transitions originating from any cell location, allowing the network to handle transitions in the test data that might not have been present in the training data, and making detection translation-invariant. Finally, we show that our network is able to handle sparse annotations such as those available in the DALY dataset, while allowing for both dense (accurate) or sparse (efficient) evaluation within a single model. We report extensive experiments on the DALY, UCF101-24 and Transformed-UCF101-24 datasets to support our claims.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="1bc3d29767d96a8549202963ad354217" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":57134144,"asset_id":37182750,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/57134144/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="37182750"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="37182750"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 37182750; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=37182750]").text(description); $(".js-view-count[data-work-id=37182750]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 37182750; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='37182750']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "1bc3d29767d96a8549202963ad354217" } } $('.js-work-strip[data-work-id=37182750]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":37182750,"title":"TraMNet -Transition Matrix Network for Efficient Action Tube Proposals","translated_title":"","metadata":{"abstract":"Current state-of-the-art methods solve spatio-temporal action localisation by extending 2D anchors to 3D-cuboid proposals on stacks of frames, to generate sets of temporally connected bounding boxes called action micro-tubes. However, they fail to consider that the underlying anchor proposal hypotheses should also move (transition) from frame to frame, as the actor or the camera do. Assuming we evaluate n 2D anchors in each frame, then the number of possible transitions from each 2D anchor to he next, for a sequence of f consecutive frames, is in the order of O(n f), expensive even for small values of f. To avoid this problem we introduce a Transition-Matrix-based Network (TraMNet) which relies on computing transition probabilities between anchor proposals while maximising their overlap with ground truth bounding boxes across frames, and enforcing sparsity via a transition threshold. As the resulting transition matrix is sparse and stochastic, this reduces the proposal hypothesis search space from O(n f) to the cardinality of the thresholded matrix. At training time, transitions are specific to cell locations of the feature maps, so that a sparse (efficient) transition matrix is used to train the network. At test time, a denser transition matrix can be obtained either by decreasing the threshold or by adding to it all the relative transitions originating from any cell location, allowing the network to handle transitions in the test data that might not have been present in the training data, and making detection translation-invariant. Finally, we show that our network is able to handle sparse annotations such as those available in the DALY dataset, while allowing for both dense (accurate) or sparse (efficient) evaluation within a single model. We report extensive experiments on the DALY, UCF101-24 and Transformed-UCF101-24 datasets to support our claims."},"translated_abstract":"Current state-of-the-art methods solve spatio-temporal action localisation by extending 2D anchors to 3D-cuboid proposals on stacks of frames, to generate sets of temporally connected bounding boxes called action micro-tubes. However, they fail to consider that the underlying anchor proposal hypotheses should also move (transition) from frame to frame, as the actor or the camera do. Assuming we evaluate n 2D anchors in each frame, then the number of possible transitions from each 2D anchor to he next, for a sequence of f consecutive frames, is in the order of O(n f), expensive even for small values of f. To avoid this problem we introduce a Transition-Matrix-based Network (TraMNet) which relies on computing transition probabilities between anchor proposals while maximising their overlap with ground truth bounding boxes across frames, and enforcing sparsity via a transition threshold. As the resulting transition matrix is sparse and stochastic, this reduces the proposal hypothesis search space from O(n f) to the cardinality of the thresholded matrix. At training time, transitions are specific to cell locations of the feature maps, so that a sparse (efficient) transition matrix is used to train the network. At test time, a denser transition matrix can be obtained either by decreasing the threshold or by adding to it all the relative transitions originating from any cell location, allowing the network to handle transitions in the test data that might not have been present in the training data, and making detection translation-invariant. Finally, we show that our network is able to handle sparse annotations such as those available in the DALY dataset, while allowing for both dense (accurate) or sparse (efficient) evaluation within a single model. We report extensive experiments on the DALY, UCF101-24 and Transformed-UCF101-24 datasets to support our claims.","internal_url":"https://www.academia.edu/37182750/TraMNet_Transition_Matrix_Network_for_Efficient_Action_Tube_Proposals","translated_internal_url":"","created_at":"2018-08-05T05:22:18.041-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"draft","co_author_tags":[{"id":31768598,"work_id":37182750,"tagging_user_id":366407,"tagged_user_id":4032373,"co_author_invite_id":6731001,"email":"g***4@gmail.com","display_order":1,"name":"Gurkirt Singh","title":"TraMNet -Transition Matrix Network for Efficient Action Tube Proposals"},{"id":31768599,"work_id":37182750,"tagging_user_id":366407,"tagged_user_id":51192593,"co_author_invite_id":null,"email":"s***4@brookes.ac.uk","display_order":2,"name":"Suman Saha","title":"TraMNet -Transition Matrix Network for Efficient Action Tube Proposals"}],"downloadable_attachments":[{"id":57134144,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57134144/thumbnails/1.jpg","file_name":"1808.00297.pdf","download_url":"https://www.academia.edu/attachments/57134144/download_file","bulk_download_file_name":"TraMNet_Transition_Matrix_Network_for_Ef.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57134144/1808.00297-libre.pdf?1533472902=\u0026response-content-disposition=attachment%3B+filename%3DTraMNet_Transition_Matrix_Network_for_Ef.pdf\u0026Expires=1743344606\u0026Signature=UIrRysBkHZG~2PpxWThI5xrCpo5bSZ-THff~4iB-CwGsPPSGKcP4PPq-Pb7TuMeq1t~lOD1mhvmKSyuk9GFL8mER1p-yxbXEeUEUxJR2Qfvg0mNhb6YI-aYqddXKlgzxC3~xw6SSms7oWmR5PsndRHq6dC7MUWfbeGE-tQlTM9ta6MarGrCVvBAbk0Tf7dqXcRYr-F9TQHKBkilZBFYMGww9K1wUX7KRmBYwSYQnyvVxxlLaEy~yA6btM1vL9fcIPan0Fr6we8-0i-1HguNFzpnqo9CN22gNfhuQIKsEP~ElhybBzIRrjmgfksiAMhiYqayg5WyvdsICOKZcokm9jg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"TraMNet_Transition_Matrix_Network_for_Efficient_Action_Tube_Proposals","translated_slug":"","page_count":18,"language":"en","content_type":"Work","summary":"Current state-of-the-art methods solve spatio-temporal action localisation by extending 2D anchors to 3D-cuboid proposals on stacks of frames, to generate sets of temporally connected bounding boxes called action micro-tubes. However, they fail to consider that the underlying anchor proposal hypotheses should also move (transition) from frame to frame, as the actor or the camera do. Assuming we evaluate n 2D anchors in each frame, then the number of possible transitions from each 2D anchor to he next, for a sequence of f consecutive frames, is in the order of O(n f), expensive even for small values of f. To avoid this problem we introduce a Transition-Matrix-based Network (TraMNet) which relies on computing transition probabilities between anchor proposals while maximising their overlap with ground truth bounding boxes across frames, and enforcing sparsity via a transition threshold. As the resulting transition matrix is sparse and stochastic, this reduces the proposal hypothesis search space from O(n f) to the cardinality of the thresholded matrix. At training time, transitions are specific to cell locations of the feature maps, so that a sparse (efficient) transition matrix is used to train the network. At test time, a denser transition matrix can be obtained either by decreasing the threshold or by adding to it all the relative transitions originating from any cell location, allowing the network to handle transitions in the test data that might not have been present in the training data, and making detection translation-invariant. Finally, we show that our network is able to handle sparse annotations such as those available in the DALY dataset, while allowing for both dense (accurate) or sparse (efficient) evaluation within a single model. We report extensive experiments on the DALY, UCF101-24 and Transformed-UCF101-24 datasets to support our claims.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":57134144,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57134144/thumbnails/1.jpg","file_name":"1808.00297.pdf","download_url":"https://www.academia.edu/attachments/57134144/download_file","bulk_download_file_name":"TraMNet_Transition_Matrix_Network_for_Ef.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57134144/1808.00297-libre.pdf?1533472902=\u0026response-content-disposition=attachment%3B+filename%3DTraMNet_Transition_Matrix_Network_for_Ef.pdf\u0026Expires=1743344606\u0026Signature=UIrRysBkHZG~2PpxWThI5xrCpo5bSZ-THff~4iB-CwGsPPSGKcP4PPq-Pb7TuMeq1t~lOD1mhvmKSyuk9GFL8mER1p-yxbXEeUEUxJR2Qfvg0mNhb6YI-aYqddXKlgzxC3~xw6SSms7oWmR5PsndRHq6dC7MUWfbeGE-tQlTM9ta6MarGrCVvBAbk0Tf7dqXcRYr-F9TQHKBkilZBFYMGww9K1wUX7KRmBYwSYQnyvVxxlLaEy~yA6btM1vL9fcIPan0Fr6we8-0i-1HguNFzpnqo9CN22gNfhuQIKsEP~ElhybBzIRrjmgfksiAMhiYqayg5WyvdsICOKZcokm9jg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":3413,"name":"Robot Vision","url":"https://www.academia.edu/Documents/in/Robot_Vision"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":7728,"name":"Object Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Object_Recognition_Computer_Vision_"},{"id":7937,"name":"Image Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Image_Recognition_Computer_Vision_"},{"id":10005,"name":"Applications of Machine Learning","url":"https://www.academia.edu/Documents/in/Applications_of_Machine_Learning"},{"id":14417,"name":"Machine Vision","url":"https://www.academia.edu/Documents/in/Machine_Vision"},{"id":21593,"name":"Artificial Inteligence","url":"https://www.academia.edu/Documents/in/Artificial_Inteligence"},{"id":25271,"name":"Artificial General Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_General_Intelligence"},{"id":54123,"name":"Artificial Neural Networks","url":"https://www.academia.edu/Documents/in/Artificial_Neural_Networks"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":141502,"name":"Inteligencia artificial","url":"https://www.academia.edu/Documents/in/Inteligencia_artificial-1"},{"id":143038,"name":"Machine Learning and Pattern Recognition","url":"https://www.academia.edu/Documents/in/Machine_Learning_and_Pattern_Recognition"},{"id":315678,"name":"Human Action Recognition","url":"https://www.academia.edu/Documents/in/Human_Action_Recognition"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-37182750-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="37182742"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/37182742/Belief_likelihood_function_for_generalised_logistic_regression"><img alt="Research paper thumbnail of Belief likelihood function for generalised logistic regression" class="work-thumbnail" src="https://attachments.academia-assets.com/57134135/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/37182742/Belief_likelihood_function_for_generalised_logistic_regression">Belief likelihood function for generalised logistic regression</a></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">The notion of belief likelihood function of repeated trials is introduced, whenever the uncertain...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">The notion of belief likelihood function of repeated trials is introduced, whenever the uncertainty for individual trials is encoded by a belief measure (a finite random set). This gen-eralises the traditional likelihood function, and provides a natural setting for belief inference from statistical data. Factorisation results are proven for the case in which conjunctive or disjunctive combination are employed, leading to analytical expressions for the lower and upper likelihoods of 'sharp' samples in the case of Bernoulli trials, and to the formulation of a generalised logistic regression framework.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-37182742-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-37182742-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/37882808/figure-1-let-us-first-analyse-the-case-we-seek-the-demp"><img alt="Let us first analyse the case n = 2. We seek the Demp- ster’s sum Belx, © Belx,, where X, = X2 = {T, F}. Figure}l|is a diagram of all the intersections of focal ele- ments of the two input BF on X, x Xg. There are 9 = 3? Figure 1: Graphical representation of Dempster’s com- bination Belx, 6 Belx, on Xi x Xo. " class="figure-slide-image" src="https://figures.academia-assets.com/57134135/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/37882810/figure-2-graphical-representation-of-the-disjunctive"><img alt="Figure 2: Graphical representation of the disjunctive combination Belx, OQ Belx, on X1 x Xe. " class="figure-slide-image" src="https://figures.academia-assets.com/57134135/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/37882813/figure-3-lower-top-and-upper-bottom-likelihood-functions"><img alt="Figure 3: Lower (top) and upper (bottom) likelihood functions plotted over the space of belief functions on X = {T, F'}, parameterised by p = m(T) (X axis) and q = m(F) (¥ axis), for the case of k = 6 successes over n = 10 trials. " class="figure-slide-image" src="https://figures.academia-assets.com/57134135/figure_003.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-37182742-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="3bcf7ce5ccd42cf71f5fd13e5b399394" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":57134135,"asset_id":37182742,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/57134135/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="37182742"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="37182742"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 37182742; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=37182742]").text(description); $(".js-view-count[data-work-id=37182742]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 37182742; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='37182742']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "3bcf7ce5ccd42cf71f5fd13e5b399394" } } $('.js-work-strip[data-work-id=37182742]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":37182742,"title":"Belief likelihood function for generalised logistic regression","translated_title":"","metadata":{"abstract":"The notion of belief likelihood function of repeated trials is introduced, whenever the uncertainty for individual trials is encoded by a belief measure (a finite random set). This gen-eralises the traditional likelihood function, and provides a natural setting for belief inference from statistical data. Factorisation results are proven for the case in which conjunctive or disjunctive combination are employed, leading to analytical expressions for the lower and upper likelihoods of 'sharp' samples in the case of Bernoulli trials, and to the formulation of a generalised logistic regression framework."},"translated_abstract":"The notion of belief likelihood function of repeated trials is introduced, whenever the uncertainty for individual trials is encoded by a belief measure (a finite random set). This gen-eralises the traditional likelihood function, and provides a natural setting for belief inference from statistical data. Factorisation results are proven for the case in which conjunctive or disjunctive combination are employed, leading to analytical expressions for the lower and upper likelihoods of 'sharp' samples in the case of Bernoulli trials, and to the formulation of a generalised logistic regression framework.","internal_url":"https://www.academia.edu/37182742/Belief_likelihood_function_for_generalised_logistic_regression","translated_internal_url":"","created_at":"2018-08-05T05:16:57.448-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"draft","co_author_tags":[],"downloadable_attachments":[{"id":57134135,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57134135/thumbnails/1.jpg","file_name":"view.pdf","download_url":"https://www.academia.edu/attachments/57134135/download_file","bulk_download_file_name":"Belief_likelihood_function_for_generalis.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57134135/view-libre.pdf?1533472903=\u0026response-content-disposition=attachment%3B+filename%3DBelief_likelihood_function_for_generalis.pdf\u0026Expires=1743344606\u0026Signature=MBpklSsajCwHtYjzupHgB7pCNly~OtKw~8Ug~MbOt7wwaHBxhtmQi2d7yu3zGo2YRKBo6uZzTmZ~QjhjZ~HtUdU4dkTNJm92HR3t8Ag2~1~LUe4bKcwM76c-T8oxMhpNA7rGil0ynQ6KkHVkx83MxVae-d8v9Kq0sMZ44zMFjsI-IjFZguoY5-aMuadBQiADSwY2UKHDcJ~2QM-Tk860aqyFnXn13MfCx02XnciopA29Jeq9mVWjbkJP3TkY1Xv0EgRsoBnfTj0sDJBSF1D4aQ0pqTLykuOLOO5JsY72Sg1oA75ff~-DssmQig~luRwVq3pjrDDzm5JydkImGGoC0Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Belief_likelihood_function_for_generalised_logistic_regression","translated_slug":"","page_count":10,"language":"en","content_type":"Work","summary":"The notion of belief likelihood function of repeated trials is introduced, whenever the uncertainty for individual trials is encoded by a belief measure (a finite random set). This gen-eralises the traditional likelihood function, and provides a natural setting for belief inference from statistical data. Factorisation results are proven for the case in which conjunctive or disjunctive combination are employed, leading to analytical expressions for the lower and upper likelihoods of 'sharp' samples in the case of Bernoulli trials, and to the formulation of a generalised logistic regression framework.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":57134135,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57134135/thumbnails/1.jpg","file_name":"view.pdf","download_url":"https://www.academia.edu/attachments/57134135/download_file","bulk_download_file_name":"Belief_likelihood_function_for_generalis.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57134135/view-libre.pdf?1533472903=\u0026response-content-disposition=attachment%3B+filename%3DBelief_likelihood_function_for_generalis.pdf\u0026Expires=1743344606\u0026Signature=MBpklSsajCwHtYjzupHgB7pCNly~OtKw~8Ug~MbOt7wwaHBxhtmQi2d7yu3zGo2YRKBo6uZzTmZ~QjhjZ~HtUdU4dkTNJm92HR3t8Ag2~1~LUe4bKcwM76c-T8oxMhpNA7rGil0ynQ6KkHVkx83MxVae-d8v9Kq0sMZ44zMFjsI-IjFZguoY5-aMuadBQiADSwY2UKHDcJ~2QM-Tk860aqyFnXn13MfCx02XnciopA29Jeq9mVWjbkJP3TkY1Xv0EgRsoBnfTj0sDJBSF1D4aQ0pqTLykuOLOO5JsY72Sg1oA75ff~-DssmQig~luRwVq3pjrDDzm5JydkImGGoC0Q__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":344,"name":"Probability Theory","url":"https://www.academia.edu/Documents/in/Probability_Theory"},{"id":465,"name":"Artificial Intelligence","url":"https://www.academia.edu/Documents/in/Artificial_Intelligence"},{"id":892,"name":"Statistics","url":"https://www.academia.edu/Documents/in/Statistics"},{"id":4060,"name":"Applied Statistics","url":"https://www.academia.edu/Documents/in/Applied_Statistics"},{"id":6404,"name":"Reasoning about Uncertainty","url":"https://www.academia.edu/Documents/in/Reasoning_about_Uncertainty"},{"id":16097,"name":"Decision Making Under Uncertainty","url":"https://www.academia.edu/Documents/in/Decision_Making_Under_Uncertainty"},{"id":22613,"name":"Probability and statistics","url":"https://www.academia.edu/Documents/in/Probability_and_statistics"},{"id":31412,"name":"Probability and Mathematical Statistics","url":"https://www.academia.edu/Documents/in/Probability_and_Mathematical_Statistics"},{"id":33069,"name":"Probability","url":"https://www.academia.edu/Documents/in/Probability"},{"id":41239,"name":"Bayesian statistics \u0026 modelling","url":"https://www.academia.edu/Documents/in/Bayesian_statistics_and_modelling"},{"id":61603,"name":"Uncertainty","url":"https://www.academia.edu/Documents/in/Uncertainty"},{"id":87364,"name":"Maximum Likelihood","url":"https://www.academia.edu/Documents/in/Maximum_Likelihood"},{"id":94223,"name":"Imprecise Probability","url":"https://www.academia.edu/Documents/in/Imprecise_Probability"},{"id":94225,"name":"Belief Functions","url":"https://www.academia.edu/Documents/in/Belief_Functions"},{"id":228350,"name":"Maximum Likelihood Estimation","url":"https://www.academia.edu/Documents/in/Maximum_Likelihood_Estimation"},{"id":388873,"name":"Mathematics and Statistics","url":"https://www.academia.edu/Documents/in/Mathematics_and_Statistics"},{"id":492766,"name":"Random Sets","url":"https://www.academia.edu/Documents/in/Random_Sets"},{"id":571797,"name":"Introduction to Probability","url":"https://www.academia.edu/Documents/in/Introduction_to_Probability"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-37182742-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="37182736"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/37182736/Action_Detection_from_a_Robot_Car_Perspective"><img alt="Research paper thumbnail of Action Detection from a Robot-Car Perspective" class="work-thumbnail" src="https://attachments.academia-assets.com/57134128/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/37182736/Action_Detection_from_a_Robot_Car_Perspective">Action Detection from a Robot-Car Perspective</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a>, <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/ValentinaFontana10">Valentina Fontana</a>, and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/StephenAkrigg">Stephen Akrigg</a></span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">We present the new Road Event and Activity Detection (READ) dataset, designed and created from an...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">We present the new Road Event and Activity Detection (READ) dataset, designed and created from an autonomous vehicle perspective to take action detection challenges to autonomous driving. READ will give scholars in computer vision, smart cars and machine learning at large the opportunity to conduct research into exciting new problems such as understanding complex (road) activities, discerning the behaviour of sentient agents, and predicting both the label and the location of future actions and events, with the final goal of supporting autonomous decision making.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="52db53463e86b6741a9cef1b5c88042d" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":57134128,"asset_id":37182736,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/57134128/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="37182736"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="37182736"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 37182736; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=37182736]").text(description); $(".js-view-count[data-work-id=37182736]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 37182736; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='37182736']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "52db53463e86b6741a9cef1b5c88042d" } } $('.js-work-strip[data-work-id=37182736]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":37182736,"title":"Action Detection from a Robot-Car Perspective","translated_title":"","metadata":{"abstract":"We present the new Road Event and Activity Detection (READ) dataset, designed and created from an autonomous vehicle perspective to take action detection challenges to autonomous driving. READ will give scholars in computer vision, smart cars and machine learning at large the opportunity to conduct research into exciting new problems such as understanding complex (road) activities, discerning the behaviour of sentient agents, and predicting both the label and the location of future actions and events, with the final goal of supporting autonomous decision making.","ai_title_tag":"READ: Action Detection for Autonomous Vehicles"},"translated_abstract":"We present the new Road Event and Activity Detection (READ) dataset, designed and created from an autonomous vehicle perspective to take action detection challenges to autonomous driving. READ will give scholars in computer vision, smart cars and machine learning at large the opportunity to conduct research into exciting new problems such as understanding complex (road) activities, discerning the behaviour of sentient agents, and predicting both the label and the location of future actions and events, with the final goal of supporting autonomous decision making.","internal_url":"https://www.academia.edu/37182736/Action_Detection_from_a_Robot_Car_Perspective","translated_internal_url":"","created_at":"2018-08-05T05:12:35.511-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"draft","co_author_tags":[{"id":31768590,"work_id":37182736,"tagging_user_id":366407,"tagged_user_id":88247893,"co_author_invite_id":6730998,"email":"v***a@studenti.unina.it","display_order":1,"name":"Valentina Fontana","title":"Action Detection from a Robot-Car Perspective"},{"id":31768591,"work_id":37182736,"tagging_user_id":366407,"tagged_user_id":88516501,"co_author_invite_id":6730999,"email":"s***g@imsu.ox.ac.uk","display_order":2,"name":"Stephen Akrigg","title":"Action Detection from a Robot-Car Perspective"},{"id":31768592,"work_id":37182736,"tagging_user_id":366407,"tagged_user_id":4032373,"co_author_invite_id":null,"email":"g***4@gmail.com","display_order":3,"name":"Gurkirt Singh","title":"Action Detection from a Robot-Car Perspective"},{"id":31768593,"work_id":37182736,"tagging_user_id":366407,"tagged_user_id":51192593,"co_author_invite_id":null,"email":"s***4@brookes.ac.uk","display_order":4,"name":"Suman Saha","title":"Action Detection from a Robot-Car Perspective"},{"id":31768594,"work_id":37182736,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5034152,"email":"f***n@gmail.com","display_order":5,"name":"Fabio Cuzzolin","title":"Action Detection from a Robot-Car Perspective"}],"downloadable_attachments":[{"id":57134128,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57134128/thumbnails/1.jpg","file_name":"1807.11332.pdf","download_url":"https://www.academia.edu/attachments/57134128/download_file","bulk_download_file_name":"Action_Detection_from_a_Robot_Car_Perspe.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57134128/1807.11332-libre.pdf?1533472912=\u0026response-content-disposition=attachment%3B+filename%3DAction_Detection_from_a_Robot_Car_Perspe.pdf\u0026Expires=1743344607\u0026Signature=g32ZjKUjZ8C8B~0Q4QmP2mTkhV~iq32vzHTkTN0iPSVLjUKjo7ceVy5VGyrsUIfa4qlYvJPRxtNdNJQ2A7SiMooteRELMgr-FEy7mKbT1yTiElnkx7AbtU~FnkeY6lAGmi8rjEDv2eGuKJG1bzez0C08jneVJen9HlZoeAuPnGcvuump9Nq2mmoh4l9ACvWtaOAKFzEoQNV4zIMB7T6KexPmB3MD58VWvChcltr-8GBwcyoZwDC-6uvxYPXIsdrD8hNNl1zK4GAbIA3XMQc0SOVwCnd67CYeTbLDcP7IAOE56jYWbx1yZtmnIotEiijIxCnsTuBAJWZILoEhUKQtxQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Action_Detection_from_a_Robot_Car_Perspective","translated_slug":"","page_count":8,"language":"en","content_type":"Work","summary":"We present the new Road Event and Activity Detection (READ) dataset, designed and created from an autonomous vehicle perspective to take action detection challenges to autonomous driving. READ will give scholars in computer vision, smart cars and machine learning at large the opportunity to conduct research into exciting new problems such as understanding complex (road) activities, discerning the behaviour of sentient agents, and predicting both the label and the location of future actions and events, with the final goal of supporting autonomous decision making.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":57134128,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/57134128/thumbnails/1.jpg","file_name":"1807.11332.pdf","download_url":"https://www.academia.edu/attachments/57134128/download_file","bulk_download_file_name":"Action_Detection_from_a_Robot_Car_Perspe.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/57134128/1807.11332-libre.pdf?1533472912=\u0026response-content-disposition=attachment%3B+filename%3DAction_Detection_from_a_Robot_Car_Perspe.pdf\u0026Expires=1743344607\u0026Signature=g32ZjKUjZ8C8B~0Q4QmP2mTkhV~iq32vzHTkTN0iPSVLjUKjo7ceVy5VGyrsUIfa4qlYvJPRxtNdNJQ2A7SiMooteRELMgr-FEy7mKbT1yTiElnkx7AbtU~FnkeY6lAGmi8rjEDv2eGuKJG1bzez0C08jneVJen9HlZoeAuPnGcvuump9Nq2mmoh4l9ACvWtaOAKFzEoQNV4zIMB7T6KexPmB3MD58VWvChcltr-8GBwcyoZwDC-6uvxYPXIsdrD8hNNl1zK4GAbIA3XMQc0SOVwCnd67CYeTbLDcP7IAOE56jYWbx1yZtmnIotEiijIxCnsTuBAJWZILoEhUKQtxQ__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":77,"name":"Robotics","url":"https://www.academia.edu/Documents/in/Robotics"},{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":2043,"name":"Mobile Robotics","url":"https://www.academia.edu/Documents/in/Mobile_Robotics"},{"id":3413,"name":"Robot Vision","url":"https://www.academia.edu/Documents/in/Robot_Vision"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":4892,"name":"Autonomous Robotics","url":"https://www.academia.edu/Documents/in/Autonomous_Robotics"},{"id":7728,"name":"Object Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Object_Recognition_Computer_Vision_"},{"id":10005,"name":"Applications of Machine Learning","url":"https://www.academia.edu/Documents/in/Applications_of_Machine_Learning"},{"id":14417,"name":"Machine Vision","url":"https://www.academia.edu/Documents/in/Machine_Vision"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":84688,"name":"Autonomous Vehicles","url":"https://www.academia.edu/Documents/in/Autonomous_Vehicles"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":111217,"name":"Scene Understanding","url":"https://www.academia.edu/Documents/in/Scene_Understanding"},{"id":113706,"name":"Autonomous Mobile Robots","url":"https://www.academia.edu/Documents/in/Autonomous_Mobile_Robots"},{"id":143038,"name":"Machine Learning and Pattern Recognition","url":"https://www.academia.edu/Documents/in/Machine_Learning_and_Pattern_Recognition"},{"id":204921,"name":"Autonomous driving","url":"https://www.academia.edu/Documents/in/Autonomous_driving"},{"id":327599,"name":"Computer Vision, Behaviour Modelling, Deep Learning","url":"https://www.academia.edu/Documents/in/Computer_Vision_Behaviour_Modelling_Deep_Learning"},{"id":559503,"name":"Machine Learning Big Data","url":"https://www.academia.edu/Documents/in/Machine_Learning_Big_Data"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-37182736-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="34486828"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/34486828/Incremental_Tube_Construction_for_Human_Action_Detection"><img alt="Research paper thumbnail of Incremental Tube Construction for Human Action Detection" class="work-thumbnail" src="https://attachments.academia-assets.com/54355230/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/34486828/Incremental_Tube_Construction_for_Human_Action_Detection">Incremental Tube Construction for Human Action Detection</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a>, <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/MichaelSapienza">Michael Sapienza</a>, <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/GurkirtSingh">Gurkirt Singh</a>, and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/SumanSaha35">Suman Saha</a></span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Current state-of-the-art action detection systems are tailored for offline batch-processing appli...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Current state-of-the-art action detection systems are tailored for offline batch-processing applications. However, for online applications like human-robot interaction, current systems fall short, either because they only detect one action per video, or because they assume that the entire video is available ahead of time. In this work, we introduce a real-time and online joint-labelling and association algorithm for action detection that can incrementally construct space-time action tubes on the most challenging action videos in which different action categories occur concurrently. In contrast to previous methods, we solve the detection-window association and action labelling problems jointly in a single pass. We demonstrate superior on-line association accuracy and speed (2.2ms per frame) as compared to the current state-of-the-art offline systems. We further demonstrate that the entire action detection pipeline can easily be made to work effectively in real-time using our action tube construction algorithm.</span></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="83d0029337d10a68bbaad224046cedad" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":54355230,"asset_id":34486828,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/54355230/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="34486828"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="34486828"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 34486828; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=34486828]").text(description); $(".js-view-count[data-work-id=34486828]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 34486828; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='34486828']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "83d0029337d10a68bbaad224046cedad" } } $('.js-work-strip[data-work-id=34486828]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":34486828,"title":"Incremental Tube Construction for Human Action Detection","translated_title":"","metadata":{"abstract":"Current state-of-the-art action detection systems are tailored for offline batch-processing applications. However, for online applications like human-robot interaction, current systems fall short, either because they only detect one action per video, or because they assume that the entire video is available ahead of time. In this work, we introduce a real-time and online joint-labelling and association algorithm for action detection that can incrementally construct space-time action tubes on the most challenging action videos in which different action categories occur concurrently. In contrast to previous methods, we solve the detection-window association and action labelling problems jointly in a single pass. We demonstrate superior on-line association accuracy and speed (2.2ms per frame) as compared to the current state-of-the-art offline systems. We further demonstrate that the entire action detection pipeline can easily be made to work effectively in real-time using our action tube construction algorithm.","ai_title_tag":"Real-Time Action Detection with Action Tubes"},"translated_abstract":"Current state-of-the-art action detection systems are tailored for offline batch-processing applications. However, for online applications like human-robot interaction, current systems fall short, either because they only detect one action per video, or because they assume that the entire video is available ahead of time. In this work, we introduce a real-time and online joint-labelling and association algorithm for action detection that can incrementally construct space-time action tubes on the most challenging action videos in which different action categories occur concurrently. In contrast to previous methods, we solve the detection-window association and action labelling problems jointly in a single pass. We demonstrate superior on-line association accuracy and speed (2.2ms per frame) as compared to the current state-of-the-art offline systems. We further demonstrate that the entire action detection pipeline can easily be made to work effectively in real-time using our action tube construction algorithm.","internal_url":"https://www.academia.edu/34486828/Incremental_Tube_Construction_for_Human_Action_Detection","translated_internal_url":"","created_at":"2017-09-06T05:37:42.545-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"draft","co_author_tags":[{"id":30223966,"work_id":34486828,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":6513817,"email":"h***b@iitk.ac.in","display_order":1,"name":"Harkirat Behl","title":"Incremental Tube Construction for Human Action Detection"},{"id":30223967,"work_id":34486828,"tagging_user_id":366407,"tagged_user_id":35748463,"co_author_invite_id":null,"email":"m***a@eng.ox.ac.uk","display_order":2,"name":"Michael Sapienza","title":"Incremental Tube Construction for Human Action Detection"},{"id":30223968,"work_id":34486828,"tagging_user_id":366407,"tagged_user_id":4032373,"co_author_invite_id":null,"email":"g***4@gmail.com","display_order":3,"name":"Gurkirt Singh","title":"Incremental Tube Construction for Human Action Detection"},{"id":30223969,"work_id":34486828,"tagging_user_id":366407,"tagged_user_id":51192593,"co_author_invite_id":null,"email":"s***4@brookes.ac.uk","display_order":4,"name":"Suman Saha","title":"Incremental Tube Construction for Human Action Detection"},{"id":30223970,"work_id":34486828,"tagging_user_id":366407,"tagged_user_id":null,"co_author_invite_id":5034152,"email":"f***n@gmail.com","display_order":5,"name":"Fabio Cuzzolin","title":"Incremental Tube Construction for Human Action Detection"},{"id":30223971,"work_id":34486828,"tagging_user_id":366407,"tagged_user_id":312333,"co_author_invite_id":null,"email":"p***r@hotmail.com","affiliation":"Oxford Brookes University","display_order":6,"name":"philip torr","title":"Incremental Tube Construction for Human Action Detection"}],"downloadable_attachments":[{"id":54355230,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/54355230/thumbnails/1.jpg","file_name":"1704.01358.pdf","download_url":"https://www.academia.edu/attachments/54355230/download_file","bulk_download_file_name":"Incremental_Tube_Construction_for_Human.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/54355230/1704.01358-libre.pdf?1504701789=\u0026response-content-disposition=attachment%3B+filename%3DIncremental_Tube_Construction_for_Human.pdf\u0026Expires=1743344607\u0026Signature=MSwo-dm4GuxDa~DJGE3FRWg9tznKFZJgkPVuabWsVXiTpcZPxgZhEt6fkqZ2AoQlAwIZ6Kq6e9C832lmWA6kFHSr11JXIC0arX2pz7BfWwRIPIgmWXaJ7bk49kzVMsfAhC7jFadEuk38N0WH5~HF8s0u7Xj2Te-ridg-xsMDiUJBEPORUuXM4blHyTLSCkcsCRwRnTUYT9bOEjMdGv6BWFS4OykeVtpRuJnKhkaKZm1rWvvnZwANlI~mupux8xrYqRpvvDXNzPCK8CUgc~S1i-cLc010IgcV4GY7T8z0P6LSf0H~7HzFU-Vq0sudo1LZUGXjZ0gV~Bk06wUxXa-gNg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Incremental_Tube_Construction_for_Human_Action_Detection","translated_slug":"","page_count":10,"language":"en","content_type":"Work","summary":"Current state-of-the-art action detection systems are tailored for offline batch-processing applications. However, for online applications like human-robot interaction, current systems fall short, either because they only detect one action per video, or because they assume that the entire video is available ahead of time. In this work, we introduce a real-time and online joint-labelling and association algorithm for action detection that can incrementally construct space-time action tubes on the most challenging action videos in which different action categories occur concurrently. In contrast to previous methods, we solve the detection-window association and action labelling problems jointly in a single pass. We demonstrate superior on-line association accuracy and speed (2.2ms per frame) as compared to the current state-of-the-art offline systems. We further demonstrate that the entire action detection pipeline can easily be made to work effectively in real-time using our action tube construction algorithm.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":54355230,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/54355230/thumbnails/1.jpg","file_name":"1704.01358.pdf","download_url":"https://www.academia.edu/attachments/54355230/download_file","bulk_download_file_name":"Incremental_Tube_Construction_for_Human.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/54355230/1704.01358-libre.pdf?1504701789=\u0026response-content-disposition=attachment%3B+filename%3DIncremental_Tube_Construction_for_Human.pdf\u0026Expires=1743344607\u0026Signature=MSwo-dm4GuxDa~DJGE3FRWg9tznKFZJgkPVuabWsVXiTpcZPxgZhEt6fkqZ2AoQlAwIZ6Kq6e9C832lmWA6kFHSr11JXIC0arX2pz7BfWwRIPIgmWXaJ7bk49kzVMsfAhC7jFadEuk38N0WH5~HF8s0u7Xj2Te-ridg-xsMDiUJBEPORUuXM4blHyTLSCkcsCRwRnTUYT9bOEjMdGv6BWFS4OykeVtpRuJnKhkaKZm1rWvvnZwANlI~mupux8xrYqRpvvDXNzPCK8CUgc~S1i-cLc010IgcV4GY7T8z0P6LSf0H~7HzFU-Vq0sudo1LZUGXjZ0gV~Bk06wUxXa-gNg__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":4095,"name":"Classification (Machine Learning)","url":"https://www.academia.edu/Documents/in/Classification_Machine_Learning_"},{"id":5109,"name":"Pattern Recognition","url":"https://www.academia.edu/Documents/in/Pattern_Recognition"},{"id":5112,"name":"Object Recognition (Pattern Recognition)","url":"https://www.academia.edu/Documents/in/Object_Recognition_Pattern_Recognition_"},{"id":7728,"name":"Object Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Object_Recognition_Computer_Vision_"},{"id":17701,"name":"Gesture Recognition","url":"https://www.academia.edu/Documents/in/Gesture_Recognition"},{"id":81182,"name":"Deep Learning","url":"https://www.academia.edu/Documents/in/Deep_Learning"},{"id":143038,"name":"Machine Learning and Pattern Recognition","url":"https://www.academia.edu/Documents/in/Machine_Learning_and_Pattern_Recognition"}],"urls":[]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (false) { Aedu.setUpFigureCarousel('profile-work-34486828-figures'); } }); </script> <div class="js-work-strip profile--work_container" data-work-id="27106383"><div class="profile--work_thumbnail hidden-xs"><a class="js-work-strip-work-link" data-click-track="profile-work-strip-thumbnail" href="https://www.academia.edu/27106383/Untrimmed_Video_Classification_for_Activity_Detection_submission_to_ActivityNet_Challenge"><img alt="Research paper thumbnail of Untrimmed Video Classification for Activity Detection: submission to ActivityNet Challenge" class="work-thumbnail" src="https://attachments.academia-assets.com/47357173/thumbnails/1.jpg" /></a></div><div class="wp-workCard wp-workCard_itemContainer"><div class="wp-workCard_item wp-workCard--title"><a class="js-work-strip-work-link text-gray-darker" data-click-track="profile-work-strip-title" href="https://www.academia.edu/27106383/Untrimmed_Video_Classification_for_Activity_Detection_submission_to_ActivityNet_Challenge">Untrimmed Video Classification for Activity Detection: submission to ActivityNet Challenge</a></div><div class="wp-workCard_item wp-workCard--coauthors"><span>by </span><span><a class="" data-click-track="profile-work-strip-authors" href="https://oxfordbrookes.academia.edu/FabioCuzzolin">Fabio Cuzzolin</a> and <a class="" data-click-track="profile-work-strip-authors" href="https://independent.academia.edu/GurkirtSingh">Gurkirt Singh</a></span></div><div class="wp-workCard_item"><span class="js-work-more-abstract-truncated">Current state-of-the-art human activity recognition is fo-cused on the classification of temporal...</span><a class="js-work-more-abstract" data-broccoli-component="work_strip.more_abstract" data-click-track="profile-work-strip-more-abstract" href="javascript:;"><span> more </span><span><i class="fa fa-caret-down"></i></span></a><span class="js-work-more-abstract-untruncated hidden">Current state-of-the-art human activity recognition is fo-cused on the classification of temporally trimmed videos in which only one action occurs per frame. We propose a simple, yet effective, method for the temporal detection of activities in temporally untrimmed videos with the help of untrimmed classification. Firstly, our model predicts the top k labels for each untrimmed video by analysing global video-level features. Secondly, frame-level binary classification is combined with dynamic programming to generate the temporally trimmed activity proposals. Finally, each proposal is assigned a label based on the global label, and scored with the score of the temporal activity proposal and the global score. Ultimately, we show that untrimmed video classification models can be used as stepping stone for temporal detection. Our method wins runner-up prize in Ac-tivtiyNet Detection challenge 2016.</span></div><div class="wp-workCard_item"><div class="carousel-container carousel-container--sm" id="profile-work-27106383-figures"><div class="prev-slide-container js-prev-button-container"><button aria-label="Previous" class="carousel-navigation-button js-profile-work-27106383-figures-prev"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_back_ios</span></button></div><div class="slides-container js-slides-container"><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/17725442/figure-1-plot-shows-ground-truth-in-blue-binary-classifier"><img alt="Figure 1: Plot shows ground truth in blue, binary classifier score in red and piece-wise constant proposal produce by DP optimisation. Binary classifier scores are high where activity is happening, which produces well aligned segment proposal with ground truth. " class="figure-slide-image" src="https://figures.academia-assets.com/47357173/figure_001.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/17725448/figure-2-plot-shows-ground-truth-in-blue-binary-classifier"><img alt="Figure 2: Plot shows ground truth in blue, binary classifier score in red and piece-wise constant proposal produce by DP optimisation. We can see two proposal are mostly aligned with ground truth. " class="figure-slide-image" src="https://figures.academia-assets.com/47357173/figure_002.jpg" /></a></figure><figure class="figure-slide-container"><a href="https://www.academia.edu/figures/17725452/figure-3-plot-shows-ground-truth-in-blue-binary-classifier"><img alt="Figure 3: Plot shows ground truth in blue, binary classifier score in red and piece-wise constant proposal produce by DP optimisation. Our method completely fails to trim two instance but only produce one segment for whole video as binary classifier score are high throughout the video duration. " class="figure-slide-image" src="https://figures.academia-assets.com/47357173/figure_003.jpg" /></a></figure></div><div class="next-slide-container js-next-button-container"><button aria-label="Next" class="carousel-navigation-button js-profile-work-27106383-figures-next"><span class="material-symbols-outlined" style="font-size: 24px" translate="no">arrow_forward_ios</span></button></div></div></div><div class="wp-workCard_item wp-workCard--actions"><span class="work-strip-bookmark-button-container"></span><a id="39ba86a62467349168595772fe500cb9" class="wp-workCard--action" rel="nofollow" data-click-track="profile-work-strip-download" data-download="{"attachment_id":47357173,"asset_id":27106383,"asset_type":"Work","button_location":"profile"}" href="https://www.academia.edu/attachments/47357173/download_file?s=profile"><span><i class="fa fa-arrow-down"></i></span><span>Download</span></a><span class="wp-workCard--action visible-if-viewed-by-owner inline-block" style="display: none;"><span class="js-profile-work-strip-edit-button-wrapper profile-work-strip-edit-button-wrapper" data-work-id="27106383"><a class="js-profile-work-strip-edit-button" tabindex="0"><span><i class="fa fa-pencil"></i></span><span>Edit</span></a></span></span></div><div class="wp-workCard_item wp-workCard--stats"><span><span><span class="js-view-count view-count u-mr2x" data-work-id="27106383"><i class="fa fa-spinner fa-spin"></i></span><script>$(function () { var workId = 27106383; window.Academia.workViewCountsFetcher.queue(workId, function (count) { var description = window.$h.commaizeInt(count) + " " + window.$h.pluralize(count, 'View'); $(".js-view-count[data-work-id=27106383]").text(description); $(".js-view-count[data-work-id=27106383]").attr('title', description).tooltip(); }); });</script></span></span><span><span class="percentile-widget hidden"><span class="u-mr2x work-percentile"></span></span><script>$(function () { var workId = 27106383; window.Academia.workPercentilesFetcher.queue(workId, function (percentileText) { var container = $(".js-work-strip[data-work-id='27106383']"); container.find('.work-percentile').text(percentileText.charAt(0).toUpperCase() + percentileText.slice(1)); container.find('.percentile-widget').show(); container.find('.percentile-widget').removeClass('hidden'); }); });</script></span></div><div id="work-strip-premium-row-container"></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/work_edit-ad038b8c047c1a8d4fa01b402d530ff93c45fee2137a149a4a5398bc8ad67560.js"], function() { // from javascript_helper.rb var dispatcherData = {} if (true){ window.WowProfile.dispatcher = window.WowProfile.dispatcher || _.clone(Backbone.Events); dispatcherData = { dispatcher: window.WowProfile.dispatcher, downloadLinkId: "39ba86a62467349168595772fe500cb9" } } $('.js-work-strip[data-work-id=27106383]').each(function() { if (!$(this).data('initialized')) { new WowProfile.WorkStripView({ el: this, workJSON: {"id":27106383,"title":"Untrimmed Video Classification for Activity Detection: submission to ActivityNet Challenge","translated_title":"","metadata":{"abstract":"Current state-of-the-art human activity recognition is fo-cused on the classification of temporally trimmed videos in which only one action occurs per frame. We propose a simple, yet effective, method for the temporal detection of activities in temporally untrimmed videos with the help of untrimmed classification. Firstly, our model predicts the top k labels for each untrimmed video by analysing global video-level features. Secondly, frame-level binary classification is combined with dynamic programming to generate the temporally trimmed activity proposals. Finally, each proposal is assigned a label based on the global label, and scored with the score of the temporal activity proposal and the global score. Ultimately, we show that untrimmed video classification models can be used as stepping stone for temporal detection. Our method wins runner-up prize in Ac-tivtiyNet Detection challenge 2016."},"translated_abstract":"Current state-of-the-art human activity recognition is fo-cused on the classification of temporally trimmed videos in which only one action occurs per frame. We propose a simple, yet effective, method for the temporal detection of activities in temporally untrimmed videos with the help of untrimmed classification. Firstly, our model predicts the top k labels for each untrimmed video by analysing global video-level features. Secondly, frame-level binary classification is combined with dynamic programming to generate the temporally trimmed activity proposals. Finally, each proposal is assigned a label based on the global label, and scored with the score of the temporal activity proposal and the global score. Ultimately, we show that untrimmed video classification models can be used as stepping stone for temporal detection. Our method wins runner-up prize in Ac-tivtiyNet Detection challenge 2016.","internal_url":"https://www.academia.edu/27106383/Untrimmed_Video_Classification_for_Activity_Detection_submission_to_ActivityNet_Challenge","translated_internal_url":"","created_at":"2016-07-19T11:10:29.898-07:00","preview_url":null,"current_user_can_edit":null,"current_user_is_owner":null,"owner_id":366407,"coauthors_can_edit":true,"document_type":"draft","co_author_tags":[{"id":22552808,"work_id":27106383,"tagging_user_id":366407,"tagged_user_id":4032373,"co_author_invite_id":null,"email":"g***4@gmail.com","display_order":1,"name":"Gurkirt Singh","title":"Untrimmed Video Classification for Activity Detection: submission to ActivityNet Challenge"},{"id":22552809,"work_id":27106383,"tagging_user_id":366407,"tagged_user_id":366407,"co_author_invite_id":5034152,"email":"f***n@brookes.ac.uk","affiliation":"Oxford Brookes University","display_order":2,"name":"Fabio Cuzzolin","title":"Untrimmed Video Classification for Activity Detection: submission to ActivityNet Challenge"}],"downloadable_attachments":[{"id":47357173,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47357173/thumbnails/1.jpg","file_name":"1607.01979v2.pdf","download_url":"https://www.academia.edu/attachments/47357173/download_file","bulk_download_file_name":"Untrimmed_Video_Classification_for_Activ.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47357173/1607.01979v2-libre.pdf?1468952065=\u0026response-content-disposition=attachment%3B+filename%3DUntrimmed_Video_Classification_for_Activ.pdf\u0026Expires=1743344607\u0026Signature=Busfa~LpDCenQvuHmraO8c51dot-56zMVlh9~UikjISXCKTITTUBvVNLYUTxvTge1WEyWCqyAWh70iKfCGEjG4hFVRmlHZXOrYdrXzDjplL0A1e3gBW19axR0eK~uAqxbaNpWsU4w7hw9zLmtAV7dplMjGGWFgzYahtSTXFVrK5IgqZLwwM3rimzPxVacl98q~SIxtjXGWFVvmnzeHJ49e5N0-7zw~YDM3WGon80IumLDT-b1FgE-b-c8JCguFe-gS8Pk6If2lOIHj13oBLFGvqd0Elj2OYXzV5M61jxn7~6Z76oxMd7397HJf2dV15MGoptQ1uQY0IcGL5DOhqo0A__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"slug":"Untrimmed_Video_Classification_for_Activity_Detection_submission_to_ActivityNet_Challenge","translated_slug":"","page_count":4,"language":"en","content_type":"Work","summary":"Current state-of-the-art human activity recognition is fo-cused on the classification of temporally trimmed videos in which only one action occurs per frame. We propose a simple, yet effective, method for the temporal detection of activities in temporally untrimmed videos with the help of untrimmed classification. Firstly, our model predicts the top k labels for each untrimmed video by analysing global video-level features. Secondly, frame-level binary classification is combined with dynamic programming to generate the temporally trimmed activity proposals. Finally, each proposal is assigned a label based on the global label, and scored with the score of the temporal activity proposal and the global score. Ultimately, we show that untrimmed video classification models can be used as stepping stone for temporal detection. Our method wins runner-up prize in Ac-tivtiyNet Detection challenge 2016.","owner":{"id":366407,"first_name":"Fabio","middle_initials":null,"last_name":"Cuzzolin","page_name":"FabioCuzzolin","domain_name":"oxfordbrookes","created_at":"2011-03-17T00:43:31.195-07:00","display_name":"Fabio Cuzzolin","url":"https://oxfordbrookes.academia.edu/FabioCuzzolin"},"attachments":[{"id":47357173,"title":"","file_type":"pdf","scribd_thumbnail_url":"https://attachments.academia-assets.com/47357173/thumbnails/1.jpg","file_name":"1607.01979v2.pdf","download_url":"https://www.academia.edu/attachments/47357173/download_file","bulk_download_file_name":"Untrimmed_Video_Classification_for_Activ.pdf","bulk_download_url":"https://d1wqtxts1xzle7.cloudfront.net/47357173/1607.01979v2-libre.pdf?1468952065=\u0026response-content-disposition=attachment%3B+filename%3DUntrimmed_Video_Classification_for_Activ.pdf\u0026Expires=1743344607\u0026Signature=Busfa~LpDCenQvuHmraO8c51dot-56zMVlh9~UikjISXCKTITTUBvVNLYUTxvTge1WEyWCqyAWh70iKfCGEjG4hFVRmlHZXOrYdrXzDjplL0A1e3gBW19axR0eK~uAqxbaNpWsU4w7hw9zLmtAV7dplMjGGWFgzYahtSTXFVrK5IgqZLwwM3rimzPxVacl98q~SIxtjXGWFVvmnzeHJ49e5N0-7zw~YDM3WGon80IumLDT-b1FgE-b-c8JCguFe-gS8Pk6If2lOIHj13oBLFGvqd0Elj2OYXzV5M61jxn7~6Z76oxMd7397HJf2dV15MGoptQ1uQY0IcGL5DOhqo0A__\u0026Key-Pair-Id=APKAJLOHF5GGSLRBV4ZA"}],"research_interests":[{"id":854,"name":"Computer Vision","url":"https://www.academia.edu/Documents/in/Computer_Vision"},{"id":2008,"name":"Machine Learning","url":"https://www.academia.edu/Documents/in/Machine_Learning"},{"id":7937,"name":"Image Recognition (Computer Vision)","url":"https://www.academia.edu/Documents/in/Image_Recognition_Computer_Vision_"},{"id":15665,"name":"Video Processing","url":"https://www.academia.edu/Documents/in/Video_Processing"},{"id":17701,"name":"Gesture Recognition","url":"https://www.academia.edu/Documents/in/Gesture_Recognition"},{"id":42835,"name":"Video Analysis","url":"https://www.academia.edu/Documents/in/Video_Analysis"},{"id":90270,"name":"Action Recognition","url":"https://www.academia.edu/Documents/in/Action_Recognition"},{"id":315678,"name":"Human Action Recognition","url":"https://www.academia.edu/Documents/in/Human_Action_Recognition"}],"urls":[{"id":7563963,"url":"https://arxiv.org/abs/1607.01979"}]}, dispatcherData: dispatcherData }); $(this).data('initialized', true); } }); $a.trackClickSource(".js-work-strip-work-link", "profile_work_strip") if (true) { Aedu.setUpFigureCarousel('profile-work-27106383-figures'); } }); </script> </div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js","https://a.academia-assets.com/assets/google_contacts-0dfb882d836b94dbcb4a2d123d6933fc9533eda5be911641f20b4eb428429600.js"], function() { // from javascript_helper.rb $('.js-google-connect-button').click(function(e) { e.preventDefault(); GoogleContacts.authorize_and_show_contacts(); Aedu.Dismissibles.recordClickthrough("WowProfileImportContactsPrompt"); }); $('.js-update-biography-button').click(function(e) { e.preventDefault(); Aedu.Dismissibles.recordClickthrough("UpdateUserBiographyPrompt"); $.ajax({ url: $r.api_v0_profiles_update_about_path({ subdomain_param: 'api', about: "", }), type: 'PUT', success: function(response) { location.reload(); } }); }); $('.js-work-creator-button').click(function (e) { e.preventDefault(); window.location = $r.upload_funnel_document_path({ source: encodeURIComponent(""), }); }); $('.js-video-upload-button').click(function (e) { e.preventDefault(); window.location = $r.upload_funnel_video_path({ source: encodeURIComponent(""), }); }); $('.js-do-this-later-button').click(function() { $(this).closest('.js-profile-nag-panel').remove(); Aedu.Dismissibles.recordDismissal("WowProfileImportContactsPrompt"); }); $('.js-update-biography-do-this-later-button').click(function(){ $(this).closest('.js-profile-nag-panel').remove(); Aedu.Dismissibles.recordDismissal("UpdateUserBiographyPrompt"); }); $('.wow-profile-mentions-upsell--close').click(function(){ $('.wow-profile-mentions-upsell--panel').hide(); Aedu.Dismissibles.recordDismissal("WowProfileMentionsUpsell"); }); $('.wow-profile-mentions-upsell--button').click(function(){ Aedu.Dismissibles.recordClickthrough("WowProfileMentionsUpsell"); }); new WowProfile.SocialRedesignUserWorks({ initialWorksOffset: 20, allWorksOffset: 18, maxSections: 3 }) }); </script> </div></div></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/wow_profile_edit-5ea339ee107c863779f560dd7275595239fed73f1a13d279d2b599a28c0ecd33.js","https://a.academia-assets.com/assets/add_coauthor-22174b608f9cb871d03443cafa7feac496fb50d7df2d66a53f5ee3c04ba67f53.js","https://a.academia-assets.com/assets/tab-dcac0130902f0cc2d8cb403714dd47454f11fc6fb0e99ae6a0827b06613abc20.js","https://a.academia-assets.com/assets/wow_profile-a9bf3a2bc8c89fa2a77156577594264ee8a0f214d74241bc0fcd3f69f8d107ac.js"], function() { // from javascript_helper.rb window.ae = window.ae || {}; window.ae.WowProfile = window.ae.WowProfile || {}; if(Aedu.User.current && Aedu.User.current.id === $viewedUser.id) { window.ae.WowProfile.current_user_edit = {}; new WowProfileEdit.EditUploadView({ el: '.js-edit-upload-button-wrapper', model: window.$current_user, }); new AddCoauthor.AddCoauthorsController(); } var userInfoView = new WowProfile.SocialRedesignUserInfo({ recaptcha_key: "6LdxlRMTAAAAADnu_zyLhLg0YF9uACwz78shpjJB" }); WowProfile.router = new WowProfile.Router({ userInfoView: userInfoView }); Backbone.history.start({ pushState: true, root: "/" + $viewedUser.page_name }); new WowProfile.UserWorksNav() }); </script> </div> <div class="bootstrap login"><div class="modal fade login-modal" id="login-modal"><div class="login-modal-dialog modal-dialog"><div class="modal-content"><div class="modal-header"><button class="close close" data-dismiss="modal" type="button"><span aria-hidden="true">×</span><span class="sr-only">Close</span></button><h4 class="modal-title text-center"><strong>Log In</strong></h4></div><div class="modal-body"><div class="row"><div class="col-xs-10 col-xs-offset-1"><button class="btn btn-fb btn-lg btn-block btn-v-center-content" id="login-facebook-oauth-button"><svg style="float: left; width: 19px; line-height: 1em; margin-right: .3em;" aria-hidden="true" focusable="false" data-prefix="fab" data-icon="facebook-square" class="svg-inline--fa fa-facebook-square fa-w-14" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512"><path fill="currentColor" d="M400 32H48A48 48 0 0 0 0 80v352a48 48 0 0 0 48 48h137.25V327.69h-63V256h63v-54.64c0-62.15 37-96.48 93.67-96.48 27.14 0 55.52 4.84 55.52 4.84v61h-31.27c-30.81 0-40.42 19.12-40.42 38.73V256h68.78l-11 71.69h-57.78V480H400a48 48 0 0 0 48-48V80a48 48 0 0 0-48-48z"></path></svg><small><strong>Log in</strong> with <strong>Facebook</strong></small></button><br /><button class="btn btn-google btn-lg btn-block btn-v-center-content" id="login-google-oauth-button"><svg style="float: left; width: 22px; line-height: 1em; margin-right: .3em;" aria-hidden="true" focusable="false" data-prefix="fab" data-icon="google-plus" class="svg-inline--fa fa-google-plus fa-w-16" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M256,8C119.1,8,8,119.1,8,256S119.1,504,256,504,504,392.9,504,256,392.9,8,256,8ZM185.3,380a124,124,0,0,1,0-248c31.3,0,60.1,11,83,32.3l-33.6,32.6c-13.2-12.9-31.3-19.1-49.4-19.1-42.9,0-77.2,35.5-77.2,78.1S142.3,334,185.3,334c32.6,0,64.9-19.1,70.1-53.3H185.3V238.1H302.2a109.2,109.2,0,0,1,1.9,20.7c0,70.8-47.5,121.2-118.8,121.2ZM415.5,273.8v35.5H380V273.8H344.5V238.3H380V202.8h35.5v35.5h35.2v35.5Z"></path></svg><small><strong>Log in</strong> with <strong>Google</strong></small></button><br /><style type="text/css">.sign-in-with-apple-button { width: 100%; height: 52px; border-radius: 3px; border: 1px solid black; cursor: pointer; } .sign-in-with-apple-button > div { margin: 0 auto; / This centers the Apple-rendered button horizontally }</style><script src="https://appleid.cdn-apple.com/appleauth/static/jsapi/appleid/1/en_US/appleid.auth.js" type="text/javascript"></script><div class="sign-in-with-apple-button" data-border="false" data-color="white" id="appleid-signin"><span ="Sign Up with Apple" class="u-fs11"></span></div><script>AppleID.auth.init({ clientId: 'edu.academia.applesignon', scope: 'name email', redirectURI: 'https://www.academia.edu/sessions', state: "cc06155d2910d77b953d8f081a944236ed69ee8918a3162710f048a45d6edc0d", });</script><script>// Hacky way of checking if on fast loswp if (window.loswp == null) { (function() { const Google = window?.Aedu?.Auth?.OauthButton?.Login?.Google; const Facebook = window?.Aedu?.Auth?.OauthButton?.Login?.Facebook; if (Google) { new Google({ el: '#login-google-oauth-button', rememberMeCheckboxId: 'remember_me', track: null }); } if (Facebook) { new Facebook({ el: '#login-facebook-oauth-button', rememberMeCheckboxId: 'remember_me', track: null }); } })(); }</script></div></div></div><div class="modal-body"><div class="row"><div class="col-xs-10 col-xs-offset-1"><div class="hr-heading login-hr-heading"><span class="hr-heading-text">or</span></div></div></div></div><div class="modal-body"><div class="row"><div class="col-xs-10 col-xs-offset-1"><form class="js-login-form" action="https://www.academia.edu/sessions" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="hsowLIQOyzNlh2CULd1ycCIbRa0zNn2hESiCWCeU3XrTHTC1Vek0nKLFq4grgtR5zKhUwUm_kjZ6FjUqVOTN-Q" autocomplete="off" /><div class="form-group"><label class="control-label" for="login-modal-email-input" style="font-size: 14px;">Email</label><input class="form-control" id="login-modal-email-input" name="login" type="email" /></div><div class="form-group"><label class="control-label" for="login-modal-password-input" style="font-size: 14px;">Password</label><input class="form-control" id="login-modal-password-input" name="password" type="password" /></div><input type="hidden" name="post_login_redirect_url" id="post_login_redirect_url" value="https://oxfordbrookes.academia.edu/FabioCuzzolin" autocomplete="off" /><div class="checkbox"><label><input type="checkbox" name="remember_me" id="remember_me" value="1" checked="checked" /><small style="font-size: 12px; margin-top: 2px; display: inline-block;">Remember me on this computer</small></label></div><br><input type="submit" name="commit" value="Log In" class="btn btn-primary btn-block btn-lg js-login-submit" data-disable-with="Log In" /></br></form><script>typeof window?.Aedu?.recaptchaManagedForm === 'function' && window.Aedu.recaptchaManagedForm( document.querySelector('.js-login-form'), document.querySelector('.js-login-submit') );</script><small style="font-size: 12px;"><br />or <a data-target="#login-modal-reset-password-container" data-toggle="collapse" href="javascript:void(0)">reset password</a></small><div class="collapse" id="login-modal-reset-password-container"><br /><div class="well margin-0x"><form class="js-password-reset-form" action="https://www.academia.edu/reset_password" accept-charset="UTF-8" method="post"><input type="hidden" name="authenticity_token" value="WDCeMOWhgN3qWRBASDBD6eEHYNLMzQ8i1a5az376hMkN556pNEZ_ci0b21xOb-XgD7RxvrZE4LW-kO29DYqUSg" autocomplete="off" /><p>Enter the email address you signed up with and we'll email you a reset link.</p><div class="form-group"><input class="form-control" name="email" type="email" /></div><script src="https://recaptcha.net/recaptcha/api.js" async defer></script> <script> var invisibleRecaptchaSubmit = function () { var closestForm = function (ele) { var curEle = ele.parentNode; while (curEle.nodeName !== 'FORM' && curEle.nodeName !== 'BODY'){ curEle = curEle.parentNode; } return curEle.nodeName === 'FORM' ? curEle : null }; var eles = document.getElementsByClassName('g-recaptcha'); if (eles.length > 0) { var form = closestForm(eles[0]); if (form) { form.submit(); } } }; </script> <input type="submit" data-sitekey="6Lf3KHUUAAAAACggoMpmGJdQDtiyrjVlvGJ6BbAj" data-callback="invisibleRecaptchaSubmit" class="g-recaptcha btn btn-primary btn-block" value="Email me a link" value=""/> </form></div></div><script> require.config({ waitSeconds: 90 })(["https://a.academia-assets.com/assets/collapse-45805421cf446ca5adf7aaa1935b08a3a8d1d9a6cc5d91a62a2a3a00b20b3e6a.js"], function() { // from javascript_helper.rb $("#login-modal-reset-password-container").on("shown.bs.collapse", function() { $(this).find("input[type=email]").focus(); }); }); </script> </div></div></div><div class="modal-footer"><div class="text-center"><small style="font-size: 12px;">Need an account? <a rel="nofollow" href="https://www.academia.edu/signup">Click here to sign up</a></small></div></div></div></div></div></div><script>// If we are on subdomain or non-bootstrapped page, redirect to login page instead of showing modal (function(){ if (typeof $ === 'undefined') return; var host = window.location.hostname; if ((host === $domain || host === "www."+$domain) && (typeof $().modal === 'function')) { $("#nav_log_in").click(function(e) { // Don't follow the link and open the modal e.preventDefault(); $("#login-modal").on('shown.bs.modal', function() { $(this).find("#login-modal-email-input").focus() }).modal('show'); }); } })()</script> <div class="bootstrap" id="footer"><div class="footer-content clearfix text-center padding-top-7x" style="width:100%;"><ul class="footer-links-secondary footer-links-wide list-inline margin-bottom-1x"><li><a href="https://www.academia.edu/about">About</a></li><li><a href="https://www.academia.edu/press">Press</a></li><li><a href="https://www.academia.edu/documents">Papers</a></li><li><a href="https://www.academia.edu/topics">Topics</a></li><li><a href="https://www.academia.edu/journals">Academia.edu Journals</a></li><li><a rel="nofollow" href="https://www.academia.edu/hiring"><svg style="width: 13px; height: 13px;" aria-hidden="true" focusable="false" data-prefix="fas" data-icon="briefcase" class="svg-inline--fa fa-briefcase fa-w-16" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M320 336c0 8.84-7.16 16-16 16h-96c-8.84 0-16-7.16-16-16v-48H0v144c0 25.6 22.4 48 48 48h416c25.6 0 48-22.4 48-48V288H320v48zm144-208h-80V80c0-25.6-22.4-48-48-48H176c-25.6 0-48 22.4-48 48v48H48c-25.6 0-48 22.4-48 48v80h512v-80c0-25.6-22.4-48-48-48zm-144 0H192V96h128v32z"></path></svg> <strong>We're Hiring!</strong></a></li><li><a rel="nofollow" href="https://support.academia.edu/hc/en-us"><svg style="width: 12px; height: 12px;" aria-hidden="true" focusable="false" data-prefix="fas" data-icon="question-circle" class="svg-inline--fa fa-question-circle fa-w-16" role="img" xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512"><path fill="currentColor" d="M504 256c0 136.997-111.043 248-248 248S8 392.997 8 256C8 119.083 119.043 8 256 8s248 111.083 248 248zM262.655 90c-54.497 0-89.255 22.957-116.549 63.758-3.536 5.286-2.353 12.415 2.715 16.258l34.699 26.31c5.205 3.947 12.621 3.008 16.665-2.122 17.864-22.658 30.113-35.797 57.303-35.797 20.429 0 45.698 13.148 45.698 32.958 0 14.976-12.363 22.667-32.534 33.976C247.128 238.528 216 254.941 216 296v4c0 6.627 5.373 12 12 12h56c6.627 0 12-5.373 12-12v-1.333c0-28.462 83.186-29.647 83.186-106.667 0-58.002-60.165-102-116.531-102zM256 338c-25.365 0-46 20.635-46 46 0 25.364 20.635 46 46 46s46-20.636 46-46c0-25.365-20.635-46-46-46z"></path></svg> <strong>Help Center</strong></a></li></ul><ul class="footer-links-tertiary list-inline margin-bottom-1x"><li class="small">Find new research papers in:</li><li class="small"><a href="https://www.academia.edu/Documents/in/Physics">Physics</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Chemistry">Chemistry</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Biology">Biology</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Health_Sciences">Health Sciences</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Ecology">Ecology</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Earth_Sciences">Earth Sciences</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Cognitive_Science">Cognitive Science</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Mathematics">Mathematics</a></li><li class="small"><a href="https://www.academia.edu/Documents/in/Computer_Science">Computer Science</a></li></ul></div></div><div class="DesignSystem" id="credit" style="width:100%;"><ul class="u-pl0x footer-links-legal list-inline"><li><a rel="nofollow" href="https://www.academia.edu/terms">Terms</a></li><li><a rel="nofollow" href="https://www.academia.edu/privacy">Privacy</a></li><li><a rel="nofollow" href="https://www.academia.edu/copyright">Copyright</a></li><li>Academia ©2025</li></ul></div><script> //<![CDATA[ window.detect_gmtoffset = true; window.Academia && window.Academia.set_gmtoffset && Academia.set_gmtoffset('/gmtoffset'); //]]> </script> <div id='overlay_background'></div> <div id='bootstrap-modal-container' class='bootstrap'></div> <div id='ds-modal-container' class='bootstrap DesignSystem'></div> <div id='full-screen-modal'></div> </div> </body> </html>