CINXE.COM
Computational Imaging XXI (COIMG)
<!DOCTYPE html> <html id="MainHtml" lang="en" class="html-main"> <head id="ctl01_Head1"><script type="text/javascript"> !function(v,y,T){var S=v.location,k="script",D="instrumentationKey",C="ingestionendpoint",I="disableExceptionTracking",E="ai.device.",b="toLowerCase",w=(D[b](),"crossOrigin"),N="POST",e="appInsightsSDK",t=T.name||"appInsights",n=((T.name||v[e])&&(v[e]=t),v[t]||function(l){var u=!1,d=!1,g={initialize:!0,queue:[],sv:"6",version:2,config:l};function m(e,t){var n={ },a="Browser";return n[E+"id"]=a[b](),n[E+"type"]=a,n["ai.operation.name"]=S&&S.pathname||"_unknown_",n["ai.internal.sdkVersion"]="javascript:snippet_"+(g.sv||g.version),{time:(a=new Date).getUTCFullYear()+"-"+i(1+a.getUTCMonth())+"-"+i(a.getUTCDate())+"T"+i(a.getUTCHours())+":"+i(a.getUTCMinutes())+":"+i(a.getUTCSeconds())+"."+(a.getUTCMilliseconds()/1e3).toFixed(3).slice(2,5)+"Z",iKey:e,name:"Microsoft.ApplicationInsights."+e.replace(/-/g,"")+"."+t,sampleRate:100,tags:n,data:{baseData:{ver:2}}};function i(e){e = "" + e;return 1===e.length?"0"+e:e}}var e,n,f=l.url||T.src;function a(e){var t,n,a,i,o,s,r,c,p;u=!0,g.queue=[],d||(d=!0,i=f,r=(c=function(){var e,t={ },n=l.connectionString;if(n)for(var a=n.split(";"),i=0;i<a.length;i++){var o=a[i].split("=");2===o.length&&(t[o[0][b]()]=o[1])}return t[C]||(t[C]="https://"+((e=(n=t.endpointsuffix)?t.location:null)?e+".":"")+"dc."+(n||"services.visualstudio.com")),t}()).instrumentationkey||l[D]||"",c=(c=c[C])?c+"/v2/track":l.endpointUrl,(p=[]).push((t="SDK LOAD Failure: Failed to load Application Insights SDK script (See stack for details)",n=i,o=c,(s=(a=m(r,"Exception")).data).baseType="ExceptionData",s.baseData.exceptions=[{typeName:"SDKLoadFailed",message:t.replace(/\./g,"-"),hasFullStack:!1,stack:t+"\nSnippet failed to load ["+n+"] -- Telemetry is disabled\nHelp Link: https://go.microsoft.com/fwlink/?linkid=2128109\nHost: "+(S&&S.pathname||"_unknown_")+"\nEndpoint: "+o,parsedStack:[]}],a)),p.push((s=i,t=c,(o=(n=m(r,"Message")).data).baseType="MessageData",(a=o.baseData).message='AI (Internal): 99 message:"'+("SDK LOAD Failure: Failed to load Application Insights SDK script (See stack for details) ("+s+")").replace(/\"/g,"")+'"',a.properties={endpoint:t},n)),i=p,r=c,JSON&&((o=v.fetch)&&!T.useXhr?o(r,{method:N,body:JSON.stringify(i),mode:"cors"}):XMLHttpRequest&&((s=new XMLHttpRequest).open(N,r),s.setRequestHeader("Content-type","application/json"),s.send(JSON.stringify(i)))))}function i(e,t){d || setTimeout(function () { !t && g.core || a() }, 500)}f&&((n=y.createElement(k)).src=f,!(o=T[w])&&""!==o||"undefined"==n[w]||(n[w]=o),n.onload=i,n.onerror=a,n.onreadystatechange=function(e,t){"loaded" !== n.readyState && "complete" !== n.readyState || i(0, t)},e=n,T.ld<0?y.getElementsByTagName("head")[0].appendChild(e):setTimeout(function(){y.getElementsByTagName(k)[0].parentNode.appendChild(e)},T.ld||0));try{g.cookie = y.cookie}catch(h){ }function t(e){for(;e.length;)!function(t){g[t] = function () { var e = arguments; u || g.queue.push(function () { g[t].apply(g, e) }) }}(e.pop())}var s,r,o="track",c="TrackPage",p="TrackEvent",o=(t([o+"Event",o+"PageView",o+"Exception",o+"Trace",o+"DependencyData",o+"Metric",o+"PageViewPerformance","start"+c,"stop"+c,"start"+p,"stop"+p,"addTelemetryInitializer","setAuthenticatedUserContext","clearAuthenticatedUserContext","flush"]),g.SeverityLevel={Verbose:0,Information:1,Warning:2,Error:3,Critical:4},(l.extensionConfig||{ }).ApplicationInsightsAnalytics||{ });return!0!==l[I]&&!0!==o[I]&&(t(["_"+(s="onerror")]),r=v[s],v[s]=function(e,t,n,a,i){var o=r&&r(e,t,n,a,i);return!0!==o&&g["_"+s]({message:e,url:t,lineNumber:n,columnNumber:a,error:i,evt:v.event}),o},l.autoExceptionInstrumented=!0),g}(T.cfg));function a(){T.onInit && T.onInit(n)}(v[t]=n).queue&&0===n.queue.length?(n.queue.push(a),n.trackPageView({ })):a()}(window,document,{ src: "https://js.monitor.azure.com/scripts/b/ai.2.min.js", crossOrigin: "anonymous", onInit: function (sdk) { window.appInsights.context.telemetryTrace.traceID = "0176292029ad1e4aa4e2b7188d268045"; sdk.addTelemetryInitializer(function (envelope) { envelope.data = envelope.data || {}; envelope.data.TenantId = "SIST"; }); }, cfg: { // Application Insights Configuration connectionString: "InstrumentationKey=96392a35-9d5a-4f7c-adcb-1a186bd1a320;IngestionEndpoint=https://eastus-8.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/", autoTrackPageVisitTime:true, enableRequestHeaderTracking: true, disableCookiesUsage: true }}); </script><meta charset="UTF-8" /><title> Computational Imaging XXI (COIMG) </title> <meta name="Description" content="This annual conference highlights the interplay between mathematical theory, physical models and computational algorithms that enable effective imaging systems." /> <meta property="og:description" content="This annual conference highlights the interplay between mathematical theory, physical models and computational algorithms that enable effective imaging systems." /> <meta name="Keywords" content=" Inverse Problems, Image Reconstruction, Image Analysis, Denoising, Model-based Imaging" /> <link rel="stylesheet" href="/Assets/css/10-UltraWaveResponsive.css?v=638750753400000000"><!–– Begin Google Script ––> <script type="text/javascript"> var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-12227505-1']); _gaq.push(['_trackPageview']); (function() { var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); })(); </script> <!–– End Google Script ––> <!–– Begin Informz Script ––> <script> //------------------------------------------------------- var z_account = "EDE8547A-6395-48C6-AF0F-9F17F3CE9387"; var z_collector = "ist.informz.net"; var z_cookieDomain = ".imaging.org"; //------------------------------------------------------- (function (e, o, n, r, t, a, s) { e[t] || (e.GlobalSnowplowNamespace = e.GlobalSnowplowNamespace || [], e.GlobalSnowplowNamespace.push(t), e[t] = function () { (e[t].q = e[t].q || []).push(arguments) }, e[t].q = e[t].q || [], a = o.createElement(n), s = o.getElementsByTagName(n)[0], a.async = 1, a.src = r, s.parentNode.insertBefore(a, s)) }(window, document, "script", "https://"+z_collector+"/web_trk/sp.js", "informz_trk")), informz_trk("newTracker", "infz", z_collector + "/web_trk/collector/", { appId: z_account, cookieDomain: z_cookieDomain }), informz_trk("setUserIdFromLocation", "_zs"), informz_trk("enableActivityTracking", 30, 15); informz_trk("trackPageView", null); </script> <!–– End Informz Script ––> <link rel="shortcut icon" href="/images/ist-logo-white.png" /> <link rel="canonical" href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_COIMG.aspx" /> <script type="text/javascript"> //<![CDATA[ var gDocumentTitle = document.title; var gPostBackFormObject = null; var gWindowOnLoad = new Array(); var gPostBackFormProcessSubmit = true; var gWebRoot = ''; var gWebSiteRoot = 'https://www.imaging.org'; var gPageTheme = 'IST-GlacierBay-Responsive'; var gWebsiteKey = '6d978a6f-475d-46cc-bcf2-7a9e3d5f8f82'; var gHKey = ''; var gIsPostBack = false; var gDefaultConfirmMessage = 'Continue without saving changes?'; var gIsEasyEditEnabled = false; var gMasterPage = '/templates/masterpages/wst-ist_naturalheritagesitesresponsive.master'; var gCurrentCultureCode = 'en-US'; if (gWebsiteKey != 'fbdf17a3-cae7-4943-b1eb-71b9c0dd65d2' && gWebsiteKey != 'fad2fd17-7e27-4c96-babe-3291ecde4822') { gIsEasyEditEnabled = false; } //]]> </script><script src="/AsiCommon/Scripts/Modernizr/Modernizr.min.js" type="text/javascript"></script><meta name="viewport" content="width=device-width, initial-scale=1.0" /> <script type="text/javascript"> // Add a class to the <html> tag of browsers that do not support Media Queries var mqSupported = Modernizr.mq('only all'); if (!mqSupported) { document.documentElement.className += ' no-mqs'; } </script> <link href="../../../../../../App_Themes/IST-GlacierBay-Responsive/99-GlacierBay_Responsive.css" type="text/css" rel="stylesheet" /><script src="/AsiCommon/Scripts/Jquery/Jquery.min.js" type="text/javascript" ></script><script src="/AsiCommon/Scripts/Jquery/jquery-migrate.min.js" type="text/javascript" ></script><script src="/AsiCommon/Scripts/Jquery/jquery-ui.min.js" type="text/javascript" ></script><style type="text/css">@import url("//fonts.googleapis.com/css?family=Raleway:200"); @import url('https://fonts.googleapis.com/css2?family=Open+Sans:ital,wght@0,300;0,400;0,600;0,700;0,800;1,300;1,400;1,600;1,700;1,800&display=swap'); @import url('https://fonts.googleapis.com/css2?family=Montserrat:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&display=swap'); @import url('https://fonts.googleapis.com/css2?family=Jost:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&display=swap'); @import url('https://fonts.googleapis.com/css2?family=Work+Sans:ital,wght@0,100;0,200;0,300;0,400;0,500;0,550;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,550;1,600;1,700;1,800;1,900&display=swap'); :root { --header-font-family: 'Jost', sans-serif; --body-font-family: 'Montserrat', sans-serif; } body { font-family: 'Montserrat', sans-serif; color: #0c0c0c; font-size: 14px; line-height:1.4; } h1 { color: #737373; background-color: white; font-family: 'Jost', sans-serif; font-size: 24px; font-weight: 500; text-transform: none; text-align: left; margin-top: 10px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 0px; padding-bottom: 0px; padding-left: 0; padding-right: 0; line-height: 1em; } h2 { color: #ffffff; background-color: #145098; font-family: 'Open Sans', sans-serif; font-size: 18px; text-transform: uppercase; font-weight: 400; display: block; padding-top: 2px; padding-bottom: 2px; padding-left: .33em; padding-right: 3px; margin-top: 1em; margin-bottom: .5em; margin-left: 0px; margin-right: 0px; margin-inline-start: 0px; margin-inline-end: 0px; border-bottom: 1px none #145098; } h3, h3.rightlink { color: #e68600; font-size: 18px; font-weight: 400; font-family: 'Jost', sans-serif; text-transform: uppercase; display: block; border-bottom: 0px solid #ddd; margin: 20px 20px 20px 20px; margin-block-start: 0em; margin-block-end: 0em; margin-inline-start: 0px; margin-inline-end: 0px; } h4, .h4, .SectionLabel { color: #000; font-size: 18px; font-weight: 400; font-family: 'Jost', sans-serif; text-transform: none; display: block; } h5 { color: #e68600; font-size: 16px; font-weight: 400; font-family: 'Jost', sans-serif; text-transform: none; display: block; border-bottom: #e68600 1px solid; } h6 { color: #000; font-size: 16px; font-weight: normal; font-family: 'Jost', sans-serif; text-transform: none; display: block; } a, .Link, .RadGrid.RadGrid .rgRow a, .RadGrid.RadGrid .rgAltRow a, .RadGrid.RadGrid .rgEditForm a, .k-grid.k-grid a, .RadToolTip.RadToolTip a { color: #0032a0; text-decoration: none; } a:visited, .Link:visited, .RadGrid.RadGrid .rgRow a:visited, .RadGrid.RadGrid .rgAltRow a:visited, .RadGrid.RadGrid .rgEditForm a:visited, .k-grid.k-grid a:visited, .RadToolTip.RadToolTip a:visited { color: #0032a0; text-decoration: none; } a:hover, a:focus, a:active, .Link:hover, .Link:focus, .Link:active, .RadGrid.RadGrid .rgRow a:hover, .RadGrid.RadGrid .rgRow a:focus, .RadGrid.RadGrid .rgRow a:active, .RadGrid.RadGrid .rgAltRow a:hover, .RadGrid.RadGrid .rgAltRow a:focus, .RadGrid.RadGrid .rgAltRow a:active, .RadGrid.RadGrid .rgEditForm a:hover, .RadGrid.RadGrid .rgEditForm a:focus, .RadGrid.RadGrid .rgEditForm a:active, .k-grid.k-grid a:hover, .k-grid.k-grid a:focus, .k-grid.k-grid a:active, .RadToolTip.RadToolTip a:hover, .RadToolTip.RadToolTip a:focus, .RadToolTip.RadToolTip a:active { color: #7f7f7f; text-decoration: underline ; } p { font-size: 14px; font-weight: 400; } .PrimaryButton, .UsePrimaryButton .TextButton, .RadGrid input.PrimaryButton { background-color: #9a9a9a; border-color: transparent; } .ISTButton { border: none; border-radius: 8px; background-color: #c00000; color: #ffffff; padding: 6px 12px; text-align: center; text-decoration: none; display: inline-block; font-size: 14px; margin: 4px 2px; cursor: pointer; } .ISTButton:hover { background-color: #9a9a9a; color: #ffffff; text-decoration: none; } .ISTButton:visited { color: #ffffff; text-decoration: none; } body:not(.Wrapper-HomePage) #hd, body:not(.Wrapper-HomePage) .InternalHeaderArea { height: 140px; background: #fff; } body:not(.Wrapper-HomePage) #hd, body:not(.Wrapper-HomePage) .InternalHeaderArea { min-height: 140px; height: 140px; box-shadow: 0 0.3em 0.6em #888; } body:not(.Wrapper-HomePage) .header-top-container #masterLogoArea { top: 19px; } body:not(.Wrapper-HomePage) .primary-navigation-area { margin-top: 10px; width: 100%; margin-left: 0px; } div#ctl01_ciPrimaryNavigation_NavControl_NavMenu { margin-top: -10px; margin-bottom: -6px; } body:not(.Wrapper-HomePage) .header-bottom-container { min-height: 40px; margin-top: 35px; top: 85px; background-color: #004a80; } .navbar-collapse, .searchbar-collapse { padding-left: 0; padding-right: 0; border: none; background: none; padding-top: 0px; } .RadMenu.RadMenu_NaturalHeritageSites .rmText, .RadMenu.RadMenu_NaturalHeritageSites .rmHorizontal .rmText, .RadMenu.RadMenu_NaturalHeritageSites .rmSlide .rmText { padding: 10px 12px; } .RadMenu.RadMenu_NaturalHeritageSites .rmSlide ul.rmGroup { padding-right:0px; } .RadMenu.MegaDropDownMenuOuter ul.rmLevel1 div.rmSlide, .RadMenu.MegaDropDownMenuOuter ul.rmLevel1 ul div.rmSlide { position:absolute; } .RadMenu.MegaDropDownMenuOuter ul.rmVertical.rmGroup.rmLevel1 li ul.rmVertical.rmGroup.rmLevel2 { width:200px; display:none; position:absolute; visibility:hidden; left:-108px; } .RadMenu.MegaDropDownMenuOuter ul.rmVertical.rmGroup.rmLevel1 li, ul.rmVertical.rmGroup.rmLevel2 li { font-size:14px; line-height:16px; padding: 1px 0px; } .RadMenu.MegaDropDownMenuOuter ul.rmLevel2 > .rmItem { width:200px; } .RadMenu_NaturalHeritageSites .rmSlide .rmExpandRight, .RadMenu_NaturalHeritageSites .rmSlide .rmGroup a.rmLink.rmExpandRight { background-image: url(/images/ResponsiveNavMed.png); background-position: 100% 0px; background-position-y:center; margin-right:10px; } .RadMenu.RadMenu_NaturalHeritageSites .rmTwoLevels .rmLevel1 > .rmItem > .rmLink, .RadMenu.RadMenu_NaturalHeritageSites .rmTwoLevels .rmLevel1 > .rmItem.rmDisabled > .rmLink:hover, .RadMenu.RadMenu_NaturalHeritageSites .rmTwoLevels .rmLevel1 > .rmItem.rmDisabled > .rmLink:focus { text-transform: capitalize; } .RadMenu.RadMenu_NaturalHeritageSites .rmRootGroup > .rmItem > .rmLink { color: #fff; font-family: Arial, Helvetica, sans-serif; font-size: 16px; font-weight: 200; letter-spacing: 0px; text-transform: uppercase; } .RadMenu.RadMenu_NaturalHeritageSites .rmSlide .rmText { padding-right: 0px; } ul.rmVertical.rmGroup.rmLevel1 { min-width:340px; } .RadMenu.RadMenu_NaturalHeritageSites .rmRootGroup > .rmItem > .rmLink:hover, .RadMenu.RadMenu_NaturalHeritageSites .rmTwoLevels .rmLevel1 > .rmItem.rmDisabled > .rmLink:focus { color: #004a80; } .RadMenu_NaturalHeritageSites .rmSlide .rmLink:hover, .RadMenu_NaturalHeritageSites .rmSlide .rmLink:focus, .RadMenu_NaturalHeritageSites .rmSlide .rmLink.rmFocused, .RadMenu_NaturalHeritageSites .rmSlide .rmLevel1 > .rmItem > .rmLink:hover, .RadMenu_NaturalHeritageSites .rmSlide .rmLevel1 > .rmItem > .rmLink:focus, .RadMenu_NaturalHeritageSites .rmSlide .rmLevel1 > .rmItem > .rmLink.rmFocused { background-color: transparent; color: #004a80; font-weight: bold; } ul.rmRootGroup.rmToggleHandles.rmHorizontal { height: 45px; margin: 0px; } nav#asi_BreadCrumbNav { font-family: arial, sans-serif; font-size: 13px; } .panel-title > a, .panel-title > a:link, .panel-title > a:hover, .panel-title > a:focus, .panel-title > a:active, .panel-title > a:visited { color: #145098; text-decoration: none; font-family: Arial, sans-serif; font-weight: normal; font-size: 24px; background: none; } h2.panel-title { background: none; color: #145098 } .event h2, .h2, .PanelTitle { background: none; } .HomeLogin h2 { background-color: #fff; text-transform: none; font-family: arial,sans-serif; padding-left: 0px; } .RadTabStrip .rtsUL, .RadTabStripVertical .rtsUL { font-family: arial, sans-serif; font-size: 12px; font-weight: bold; } .RadTabStrip_MetroTouch.RadTabStrip_MetroTouch .rtsLevel1 .rtsSelected { background-color: #004a80 !important; border-color: #ccc!important; color: white; text-decoration: none !important; border-top-left-radius: 10px; border-top-right-radius: 10px; } .RadTabStrip_MetroTouch .rtsLevel1 .rtsLink { margin: 0 0 0 5px; border: 1px solid #e0dfdf; background-color: #f9f9f9; border-top-left-radius: 10px; border-top-right-radius: 10px; margin-left:5px; } div#ctl01_TemplateBody_WebPartManager1_gwpciMAINContentCollectionOrganizerCommon2_ciMAINContentCollectionOrganizerCommon2_radPage { font-family: arial, sans-serif; font-size: 14px; } .obo-panel { margin-top: 50px; } .navbar-header .NavigationLink { color: #004a80; padding-left: 0px; } .UtilityNavigation .account-menu > li > a, .UtilityNavigation .account-menu > li > a:hover, .UtilityNavigation .account-menu > li > a:visited, .UtilityNavigation .account-menu > li > a:active { color: #004a80; } .account-menu .profile-picture-wrapper { position: relative; display: none; width: 34px; } .HeaderSocial { float: left; margin-top: -75px; margin-left: -50px; } li#ctl01_ciUtilityNavigation_ctl01_OBOToggleLI { display: none; } .header-top-container .navbar-header { position: absolute; right: 0; top: 10px; } .header-search .search-field { padding: 0 0 0 7px; padding-left: 34px !important; background-position: 4px -15px !important; background-color: #fff; border: #004a80 .25px solid; height: 25px; background-size: 16px; } a.ste-toggle.off { margin-top: -4px; background-color: #fff; } .UtilitySection.UtilityAccountArea { font-family: Arial, san-serif; } .UtilitySection.UtilitySearch { float:right; margin-top:-8px; } .obo-label { font-weight: bold; font-family: arial, sans-serif; font-size: 12px; } .obo-actions { display: inline-block; font-family: arial, sans-serif; font-size: 12px; } .ISTLeftNav_BigButtonSingle { background-color: #eeeeee; font-family: jost, sans-serif; font-size: 13px; text-transform: uppercase; color: #0c0c0c; margin-top: 10px; } .ISTLeftNav_BigButtonSingle a { color: #0c0c0c; } .ISTLeftNav_BigButtonSingle a:visited { color: #0c0c0c; } .ISTLeftNav_BigButtonSingle li a:hover{ color: #004a80; font-weight: bold; text-decoration: none; } #masterContentArea { margin-top: 50px; } .footer-content { background-color: #fff; color: #004a80; text-align: center; border-top: #004a80 1px solid; padding-top:40px; } .footer-nav { float: left; position: absolute; width: 45%; } .footer-col { float: left; width: 30%; text-align: left; } .footer-col ul { list-style: none; text-transform: none; padding-left: 0; } .footer-col li { padding-top: 10px; font-size: 15px; font-family: 'Jost'; } .footer-col a, .footer-col a:link { text-decoration: none; color: #004a80; } .footer-col a:hover { text-decoration: underline; color: #191919; } .FooterLogo { margin-left: 40px; } .FooterLogo img { margin-right: 0px; width: 120px; } .FooterSocial { text-align: left; padding: 25px 0 0 0; height: 95px; } .FooterSocial p { margin: 13px 20px 0px 0; float: left; } .FooterTop { float: right; width: 20%; } .FooterBottom { font-family: Arial, Sans-Serif; margin-top: 0px; } .FooterBottom p { font-size: 11px; } .FooterBottom a, .FooterBottom a:link { text-decoration: none; color: #004a80; } .FooterBottom a:hover { text-decoration: underline; color: #191919; } .footer-nav-copyright { background-color: #fff; color: #004a80; } .backToTop { bottom: 0px; background: transparent url(/images/icons/BackToTop.png) 3px 0 no-repeat !important; background-color: #fff !important; } @media (max-width: 992px) { body:not(.Wrapper-HomePage) .primary-navigation-area { margin: 0px; } div#ste_container_ciUtilityNavigation { float: left; } div#ste_container_ciNewUtilityNavigationCommon2 { float: right; } .navbar-header .NavigationLink { color: #004a80; padding-left: 10px; } .OnBehalfOfContainer { width: 100%; float: right; } .header-container .nav-auxiliary { margin-top: -35px; position: relative; } .header-container .UtilityNavigation.nav-auxiliary { margin-bottom: 10px; margin-top: -10px; } .ClearFix.header-bottom-container { background-color: #fff; } .header-logo-container { height: 100px; } .HeaderSocial { display:none; } body:not(.Wrapper-HomePage) #hd, body:not(.Wrapper-HomePage) .InternalHeaderArea { height: 0px; min-height: 1px; } body:not(.Wrapper-HomePage) .header-bottom-container { min-height: 40px; margin-top: 0px; top: 65px; background-color: #fff; } div#mainContentWrapper { margin-top: 30px; } #masterContentArea { margin-top:30px; } .RadMenu.RadMenu_NaturalHeritageSites .rmRootGroup > .rmItem > .rmLink { color: #004a80; } .RadMenu.RadMenu_NaturalHeritageSites .rmRootGroup > .rmItem > .rmLink:expanded { color: #004a80; } .RadMenu.RadMenu_NaturalHeritageSites .rmRootGroup > .rmItem > .rmLink:hover { color: #004a80; } .RadMenu_NaturalHeritageSites .rmSlide .rmExpandRight, .RadMenu_NaturalHeritageSites .rmSlide .rmGroup a.rmLink.rmExpandRight { background-image: none; } nav#asi_BreadCrumbNav { margin-left: 0px; } li#ctl01_ciUtilityNavigation_ctl01_SignInLI { padding-right: 10px; } #menuv-container { width: 100%; } .about .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 95%; margin-bottom: 10px; } .about .row { position: relative; } .about .col-sm-9 { margin-bottom: 250px; } .member .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 95%; margin-bottom: 10px; } .member .row { position: relative; } .member .col-sm-9 { margin-bottom: 100px; } .honors .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 95%; margin-bottom: 10px; } .honors .row { position: relative; } .honors .col-sm-9 { margin-bottom: 600px; } .pubs .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 95%; margin-bottom: 10px; } .pubs .row { position: relative; } .pubs .col-sm-9 { margin-bottom: 400px; } .standards .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 95%; margin-bottom: 10px; } .standards .row { position: relative; } .standards .col-sm-9 { margin-bottom: 400px; } .JournPI .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 88%; margin-bottom: 10px; } .JournPI .row { position: relative; } .JournPI .col-sm-9 { margin-bottom: 700px; } } @media (max-width: 767px) { .footer-col { float: left; width: 100%; text-align: left; } .footer-col ul { padding-bottom: 10px; margin-left: 5px; } .footer-col li { padding-top: 0px; } .footer-content { background-color: #fff; color: #004a80; text-align: center; border-top: #004a80 1px solid; padding-top: 40px; float: left; position: absolute; width: 100%; } .footer-nav { float: left; position: relative; width: 100%; } .FooterTop { float: left; width: 100%; margin: 30px 0 20px 0; } .FooterLogo { margin-left: 0px; text-align: left; float: left; } .FooterLogo img { margin-right: 0px; width: 100px; } .FooterSocial { padding: 0; height: 95px; } .FooterSocial p { margin: 13px 0px 0px 40px; float: right; } .FooterSocialText { float: right; width: 60%; text-align: right; } .FooterSocialImg { float: right; width: 60%; text-align: right; } .FooterCopyright { padding: 0px; text-align: left; } .FooterPolicies { float: left; } } </style><link href="/WebResource.axd?d=vsKpXOsoEwDUfVug9KOqSkSpSXVaNBxqYq0TMVVWWh_D58LwxadHoTawtKs_sAzbrfD81FIKiaLMHfE8C-_1biM4aI5bWvBCogQPX0gqgkMOYJs3nRZiavvQhAxxuDxC0&t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><link href="/WebResource.axd?d=HkmiNSkKJcp9zYO7iQ4LQYfcigDVRny_bpX66yTnidFPn8M0fokYr_cMbxH1xEMngudkfC-ZQGay1WGMDdRErxNZUgamrg-I8VpzCrtaoIMj66mt5nON1ewFVLY5L_q3Yce4_RXJFjxRuseRI8WsKg2&t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><link href="/WebResource.axd?d=501uTrfxke03AuVmqnFlcktdinHr9iVATciqFqhrqcOKTkomCd2QsHcrpjM_-xP-CqLQMjvRfrLOklibzCo45kW9flZoyUMNvcu5Mzw18RmO3Ap9KCdkr0PDfDKg4UD2xehjr4iqZqrdKJTFAyVnpJ_TSCRLyoWqgAqgqCSum1w1&t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><link href="/WebResource.axd?d=lsHxUYuoKn-ifTgGVgyNZQIZvqADQrnISEv67X7zPJZKRrk0V0-vW7xMMI_xGUYrNRgY0V24wprRwBJ4JAXB-PL_gpUMjcMBvixi6LjgrUFLgqzRtutTUA_SoLdglMWBbO_rKbCsgkpfQKzxCPGYpg2&t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><link href="/WebResource.axd?d=QKX1AO1vX8ebWKfbb4eOTEvhmgWml__oci93TQX-3srWTTq0hFmFYaJFDB9UKx-GfZqFRMKXn0jqsLJL5ywha6wcGEkFGP5adK9HNSwDXjbr9acE3_Dn_vJGCZ6A4DPI0&t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><link href="/WebResource.axd?d=_s8C6V0hVmZ50IR7zUlbBIx5hUAxbrT46F-Qe8ZhKIJt5wIqEePTdOgtTXhtJoRp9My5-D7o68asFZMfXdfLr664FIRyOvnOI7-P_C5gb1tC239yby2f-BLyA5vIZfs93zIzosPTIzGVgEB2TKXRYc6QuLCwX1Z_PNYRI9JRlaA1&t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><meta property="og:title" content="Computational Imaging XXI (COIMG)" /><style type="text/css"> .ctl01_TemplateBody_ContentPage1_WebPartZone1_Page1_0 { border-color:Black;border-width:1px;border-style:Solid; } .ctl01_TemplateBody_ContentPage2_WebPartZone2_Page1_0 { border-color:Black;border-width:1px;border-style:Solid; } </style></head> <body id="MainBody"> <a href="#mainContentWrapper" class="screen-reader-text show-on-focus">Skip to main content</a> <form method="post" action="/IST/IST/Conferences/EI/EI2023/Conference/C_COIMG.aspx?" id="aspnetForm" class="form-main" novalidate=""> <div class="aspNetHidden"> <input type="hidden" name="__WPPS" id="__WPPS" value="s" /> <input type="hidden" name="__CTRLKEY" id="__CTRLKEY" value="" /> <input type="hidden" name="__SHIFTKEY" id="__SHIFTKEY" value="" /> <input type="hidden" name="ctl01_ScriptManager1_TSM" id="ctl01_ScriptManager1_TSM" value="" /> <input type="hidden" name="PageInstanceKey" id="PageInstanceKey" value="0a7d2fab-d2d6-4016-a723-91c8289aad42" /> <input type="hidden" name="__RequestVerificationToken" id="__RequestVerificationToken" value="3H3GH7oz6RnBDAfncUKDe9FFg2SrgB908TXcm4wcXiRY8BWwUfOSMUYvOL3NsT0mk495P2U2eUEQUfdzHvJkRt0xOvLqTnitqkG4ltVzcKM1" /> <input type="hidden" name="TemplateUserMessagesID" id="TemplateUserMessagesID" value="ctl01_TemplateUserMessages_ctl00_Messages" /> <input type="hidden" name="PageIsDirty" id="PageIsDirty" value="false" /> <input type="hidden" name="IsControlPostBack" id="IsControlPostBack" value="1" /> <input type="hidden" name="__EVENTTARGET" id="__EVENTTARGET" value="" /> <input type="hidden" name="__EVENTARGUMENT" id="__EVENTARGUMENT" value="" /> <input type="hidden" name="NavMenuClientID" id="NavMenuClientID" value="ctl01_ciPrimaryNavigation_NavControl_NavMenu" /> <input type="hidden" name="AtomObjectPrimaryKeyctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_TaggedListRepeater_ctl00_ctl00" id="AtomObjectPrimaryKeyctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_TaggedListRepeater_ctl00_ctl00" value="/wEUKwABKClYU3lzdGVtLkd1aWQsIG1zY29ybGliLCBWZXJzaW9uPTQuMC4wLjAsIEN1bHR1cmU9bmV1dHJhbCwgUHVibGljS2V5VG9rZW49Yjc3YTVjNTYxOTM0ZTA4OSQ4ZDg3MWRmOS1kOWE4LTRiNmEtOTJlOS0xOTNjNTczYzQ0OTQ=" /> <input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value="ZOlwsbdla/DRsc5gT2L+dXn1630+BWlOh3een6SjT/Sqqn3p/xlNfZiVaT4TgHvtbUK/1kN66eIVAMRdX9GtIQ3iBL/Cd/CeWEZFoAbFUGo4+Zr2dPfeWhCdIU74JgFMjcb9213v3763aM82C8lb83A3PLRQs6C8ir93CsWBEQ2OwCIVm+Bj0QU3+PuW048E44Sts2NZgnIeF10oPwm3cvZv9VHIlMRgm++8Y2w3aDPmw1m9jfPecBIP1z8igc56Lavg7ZLfzfw+5J3cYEf+ClxPEgfnZ30WX6jbI6fd5uo9T8vw9aLYIQTR2/m0XCJt1dFo5VJB0Uuavb+5MUGuV/al1DNeNRPpjJDuda7SdPLDADlt3z5gkOGPIFBpXzPbI3LYy+tlPDYwQHd3eEbSZJ5YagyK31CZ01g232dO+9T4HkBQIJi3XhIdMk/xDrsXlac6SzEqNHaMmhUkG9gm/QIP/igJwAOio0WOBjcccT/WtgUDqS/3i0NJX4uoGUHln1Bh1Putg9lZaw8LakWcg3sh0r6fWRPP5Cw3ewIPEQOfVLJIDFtifrvFsd++95O4T6b/gMiT45e5fJZCPe9c0q8Dw9KJE2Ip7CynSb1l6CbY57xu79fUBNjpjI9gkZQYnZQSMi5c4OWNUcvZH6GtKw==" /> </div> <script type="text/javascript"> //<![CDATA[ var theForm = document.forms['aspnetForm']; function __doPostBack(eventTarget, eventArgument) { if (!theForm.onsubmit || (theForm.onsubmit() != false)) { theForm.__EVENTTARGET.value = eventTarget; theForm.__EVENTARGUMENT.value = eventArgument; theForm.submit(); } } //]]> </script> <script src="/WebResource.axd?d=pynGkmcFUV13He1Qd6_TZLwPOoJkZw-ail94-Z1e8o9JKy5XzVIKyzXBuAdScQY_NLwimceAOofHK9IKX_cayw2&t=638628063619783110" type="text/javascript"></script> <script src="/ScriptResource.axd?d=xtDSkLnUefsDkXzy1V9OGa6OCagMXmkl_on3a8y8NGDPptd6ziBM_mXXpJaW6g3bAnmjPrqbW2lbph1KssEF59UbMuFmudeQGDDUAWBHVZTdtlKFn7LFGi9mzCfs5F5gse9haA3GMbufpptHVBPI0A2&t=539c0818" type="text/javascript"></script> <script src="/ScriptResource.axd?d=p_deTm7itTQEj4Yl_o9V5132R7IOH_FNkOrNg0Wu4z-0Jjzbg8oBmdxEdRBkchw1tEKKp1WoH6jZO5LAJJGzCkc-KY5mVc68zRsR3i7g0WA6qQgfRzQnMZfrhMdR7PmIqa76dpt6oxpcUJLoqJ_Ouw2&t=ffffffffc7a8e318" type="text/javascript"></script> <script type="text/javascript"> //<![CDATA[ function CopyMoveContentItem_Callback(dialogWindow) { if (!dialogWindow.result) return; eval(dialogWindow.Argument.replace('[[RESULT]]',dialogWindow.result)); } //]]> </script> <script src="/Telerik.Web.UI.WebResource.axd?_TSM_HiddenField_=ctl01_ScriptManager1_TSM&compress=1&_TSM_CombinedScripts_=%3b%3bAjaxControlToolkit%3aen-US%3a0c8c847b-b611-49a7-8e75-2196aa6e72fa%3aea597d4b%3ab25378d2%3bTelerik.Web.UI%2c+Version%3d2024.3.805.462%2c+Culture%3dneutral%2c+PublicKeyToken%3d121fae78165ba3d4%3aen-US%3a169c7ca7-1df1-4370-a5b9-ee71a36cb3f0%3a16e4e7cd%3a33715776%3af7645509%3a24ee1bba%3ae330518b%3a2003d0b8%3ac128760b%3a1e771326%3a88144a7a%3ac8618e41%3a1a73651d%3a333f8d94%3a8e6f0d33%3a1f3a7489%3a6a6d718d%3aed16cbdc%3a19620875%3a874f8ea2%3ab2e06756%3af46195d3%3a92fe8ea0%3afa31b949%3a4877f69a%3a490a9d4e%3abd8f85e4" type="text/javascript"></script> <script src="/AsiCommon/Scripts/Asi.js?v=-1289050844" type="text/javascript"></script> <script src="/iparts/Common/ContentCollectionOrganizer/ContentCollectionOrganizer.js" type="text/javascript"></script> <script type="text/javascript"> //<![CDATA[ var PageMethods = function() { PageMethods.initializeBase(this); this._timeout = 0; this._userContext = null; this._succeeded = null; this._failed = null; } PageMethods.prototype = { _get_path:function() { var p = this.get_path(); if (p) return p; else return PageMethods._staticInstance.get_path();}, GetActionLink:function(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,succeededCallback, failedCallback, userContext) { return this._invoke(this._get_path(), 'GetActionLink',false,{action:action,templateType:templateType,docType:docType,hierarchyKey:hierarchyKey,documentVersionKey:documentVersionKey,folderHierarchyKey:folderHierarchyKey,itemCount:itemCount,closeWindowOnCommit:closeWindowOnCommit,websiteKey:websiteKey,pageInstanceKey:pageInstanceKey},succeededCallback,failedCallback,userContext); }, GetWindowProperties:function(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,succeededCallback, failedCallback, userContext) { return this._invoke(this._get_path(), 'GetWindowProperties',false,{action:action,templateType:templateType,docType:docType,hierarchyKey:hierarchyKey,documentVersionKey:documentVersionKey,folderHierarchyKey:folderHierarchyKey,itemCount:itemCount,closeWindowOnCommit:closeWindowOnCommit,websiteKey:websiteKey,pageInstanceKey:pageInstanceKey},succeededCallback,failedCallback,userContext); }, GetAddressCompletionList:function(prefixText,succeededCallback, failedCallback, userContext) { return this._invoke(this._get_path(), 'GetAddressCompletionList',false,{prefixText:prefixText},succeededCallback,failedCallback,userContext); }, CheckForPasteConflict:function(selectedKeys,targetHierarchyKey,succeededCallback, failedCallback, userContext) { return this._invoke(this._get_path(), 'CheckForPasteConflict',false,{selectedKeys:selectedKeys,targetHierarchyKey:targetHierarchyKey},succeededCallback,failedCallback,userContext); }} PageMethods.registerClass('PageMethods',Sys.Net.WebServiceProxy); PageMethods._staticInstance = new PageMethods(); PageMethods.set_path = function(value) { PageMethods._staticInstance.set_path(value); } PageMethods.get_path = function() { return PageMethods._staticInstance.get_path(); } PageMethods.set_timeout = function(value) { PageMethods._staticInstance.set_timeout(value); } PageMethods.get_timeout = function() { return PageMethods._staticInstance.get_timeout(); } PageMethods.set_defaultUserContext = function(value) { PageMethods._staticInstance.set_defaultUserContext(value); } PageMethods.get_defaultUserContext = function() { return PageMethods._staticInstance.get_defaultUserContext(); } PageMethods.set_defaultSucceededCallback = function(value) { PageMethods._staticInstance.set_defaultSucceededCallback(value); } PageMethods.get_defaultSucceededCallback = function() { return PageMethods._staticInstance.get_defaultSucceededCallback(); } PageMethods.set_defaultFailedCallback = function(value) { PageMethods._staticInstance.set_defaultFailedCallback(value); } PageMethods.get_defaultFailedCallback = function() { return PageMethods._staticInstance.get_defaultFailedCallback(); } PageMethods.set_enableJsonp = function(value) { PageMethods._staticInstance.set_enableJsonp(value); } PageMethods.get_enableJsonp = function() { return PageMethods._staticInstance.get_enableJsonp(); } PageMethods.set_jsonpCallbackParameter = function(value) { PageMethods._staticInstance.set_jsonpCallbackParameter(value); } PageMethods.get_jsonpCallbackParameter = function() { return PageMethods._staticInstance.get_jsonpCallbackParameter(); } PageMethods.set_path("C_COIMG.aspx"); PageMethods.GetActionLink= function(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,onSuccess,onFailed,userContext) {PageMethods._staticInstance.GetActionLink(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,onSuccess,onFailed,userContext); } PageMethods.GetWindowProperties= function(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,onSuccess,onFailed,userContext) {PageMethods._staticInstance.GetWindowProperties(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,onSuccess,onFailed,userContext); } PageMethods.GetAddressCompletionList= function(prefixText,onSuccess,onFailed,userContext) {PageMethods._staticInstance.GetAddressCompletionList(prefixText,onSuccess,onFailed,userContext); } PageMethods.CheckForPasteConflict= function(selectedKeys,targetHierarchyKey,onSuccess,onFailed,userContext) {PageMethods._staticInstance.CheckForPasteConflict(selectedKeys,targetHierarchyKey,onSuccess,onFailed,userContext); } var gtc = Sys.Net.WebServiceProxy._generateTypedConstructor; Type.registerNamespace('Asi.Web.UI.Common.BSA'); if (typeof(Asi.Web.UI.Common.BSA.WindowProperties) === 'undefined') { Asi.Web.UI.Common.BSA.WindowProperties=gtc("Asi.Web.UI.Common.BSA.WindowProperties"); Asi.Web.UI.Common.BSA.WindowProperties.registerClass('Asi.Web.UI.Common.BSA.WindowProperties'); } Type.registerNamespace('Asi.Web.UI'); if (typeof(Asi.Web.UI.PageOperation) === 'undefined') { Asi.Web.UI.PageOperation = function() { throw Error.invalidOperation(); } Asi.Web.UI.PageOperation.prototype = {None: 0,Edit: 1,New: 2,Execute: 3,Select: 4,SelectAndReturnValue: 5,Delete: 6,Purge: 7,Import: 8,Export: 9,Publish: 10,Versions: 11,Refresh: 12,Cut: 13,Copy: 14,Paste: 15,Undo: 16,SelectAll: 17,Search: 18,Preview: 19,Submit: 20,RequestDelete: 21,PurgeAll: 22,Download: 23} Asi.Web.UI.PageOperation.registerEnum('Asi.Web.UI.PageOperation', true); } if (typeof(Asi.Web.UI.TemplateType) === 'undefined') { Asi.Web.UI.TemplateType = function() { throw Error.invalidOperation(); } Asi.Web.UI.TemplateType.prototype = {I: 0,D: 1,P: 2,E: 3,T: 4,F: 5,A: 6} Asi.Web.UI.TemplateType.registerEnum('Asi.Web.UI.TemplateType', true); } //]]> </script> <object hidden type='application/json'><param name='__ClientContext' id='__ClientContext' value='{"baseUrl":"/","isAnonymous":true,"tenantId":"SIST","loggedInPartyId":"54444","selectedPartyId":"54444","websiteRoot":"https://www.imaging.org/","virtualDir":"","appTimeZoneOffset":-14400000.0,"cookieConsent":2}'></object> <input type="hidden" name="ctl01$lastClickedElementId" id="lastClickedElementId" /> <script type="text/javascript"> //<![CDATA[ Sys.WebForms.PageRequestManager._initialize('ctl01$ScriptManager1', 'aspnetForm', ['tctl01$UserMessagesUpdatePanel','','tctl01$TemplateBody$PublishUpdatePanel','','tctl01$TemplateBody$WebPartManager1$gwpciConfCCO$ciConfCCO$updatePanel','','tctl01$RadAjaxManager1SU',''], ['ctl01$ScriptManager1','','ctl01$TemplateBody$WebPartManager1$gwpciConfCCO$ciConfCCO$radTab_Top',''], [], 3600, 'ctl01'); //]]> </script> <input type="hidden" name="ctl01$ScriptManager1" id="ctl01_ScriptManager1" /> <script type="text/javascript"> //<![CDATA[ Sys.Application.setServerId("ctl01_ScriptManager1", "ctl01$ScriptManager1"); Sys.Application._enableHistoryInScriptManager(); //]]> </script> <iframe id="__historyFrame" src="/ScriptResource.axd?d=0hQFKW7qevgoBke-yiC-5nhACAvQXP1GJYEb7Pki-E_oa3iNqOpFkNepg-u2wCYQewshWWEMkag6YMBmCjZQp8bSMq6SvCjUsppoWP9yznGnfDQs7tWUqjg4-YxIMs690" style="display:none;"> </iframe> <script type="text/javascript"> </script> <div id="ctl01_masterWrapper" class="wrapper"> <a id="PageTop" class="sr-only">Top of the Page</a> <header id="hd" class="header ClearFix navbar" data-height-offset="true"> <div class="header-top-container"> <div class="header-container"> <div id="masterLogoArea" class="header-logo-container pull-left" data-label="Logo"> <div ID="WTZone1_Page1" class="WTZone "> <div id="ste_container_ciLogoContent" class="ContentItemContainer"> <div id="ste_container_HeaderLogoSpan" class="ContentItemContainer"><strong style="color: transparent;"><a href="https://www.imaging.org/" style="color: transparent;"> <div style="height: 55px; width: 400px;"><img src="/images/IST_website_banner.png" style="width: 434px; height: 93px;"></div> </a></strong></div><div class="ContentRecordPageButtonPanel"> </div> </div> </div> </div> <div class="navbar-header nav-auxiliary pull-right" id="auxiliary-container" data-label="Utility"> <div ID="WTZone2_Page1" class="WTZone "> <div id="ste_container_ciUtilityNavigation" class="ContentItemContainer"><div id="ctl01_ciUtilityNavigation_UtilityPlaceholder" class="UtilityNavigation nav-auxiliary" role="navigation"> <div class="UtilitySection UtilityAccountArea"> <ul id="ctl01_ciUtilityNavigation_ctl01_AccountMenu" class="account-menu"> <li id="ctl01_ciUtilityNavigation_ctl01_SignInLI"> <a id="ctl01_ciUtilityNavigation_ctl01_LoginStatus1" class="sign-in-link" translate="yes" href="javascript:__doPostBack('ctl01$ciUtilityNavigation$ctl01$LoginStatus1$ctl02','')">Sign in</a> </li> </ul> <script> function ToggleOBO(toggle_id) { var $this = jQuery("#" + toggle_id); var enabled = $this.hasClass("on"); var button; if (enabled) // click the clear button button = window.$get('ctl01_ciUtilityNavigation_ctl01_OBOControlPanel_ClearContactButton'); else // click the select button button = window.$get('ctl01_ciUtilityNavigation_ctl01_OBOControlPanel_SelectContactButton'); if (button != null) button.click(); } jQuery(document).ready(function () { jQuery('.website-item:gt(2)').hide(); jQuery('.js-show-more-sites').show(); jQuery('.js-show-more-sites a').on("click", function () { jQuery('.website-item:not(:visible):lt(5)').fadeIn(function () { if (jQuery('.website-item:not(:visible)').length == 0) { jQuery('.website-item a').last().focus(); jQuery('.js-show-more-sites').remove(); } }); return false; }); }); </script> </div><div class="UtilitySection UtilityNavigationList"> <ul class='NavigationUnorderedList'><li id="ctl01_ciUtilityNavigation_ctl03__rptWrapper__rptWrapper_rpt_ctl01_NavigationListItem" class="NavigationListItem nav-aux-button nav-aux-cart"><a id="ctl01_ciUtilityNavigation_ctl03__rptWrapper__rptWrapper_rpt_ctl01_NavigationLink" class="NavigationLink" onClick="MenuLI_OnClick('/IST/IST/iMIS/Store/StoreLayouts/Cart_OLD.aspx?hkey=f596492d-3628-4f58-b5ac-bbf9ff3a0784')" href="/IST/IST/iMIS/Store/StoreLayouts/Cart_OLD.aspx?hkey=f596492d-3628-4f58-b5ac-bbf9ff3a0784"><span class="nav-text" translate="yes">Cart </span><span><span data-cartlink='y' class="cartEmpty"><span class="cartSprite"></span></span></span></a></li><li id="ctl01_ciUtilityNavigation_ctl03__rptWrapper__rptWrapper_rpt_ctl02_NavigationListItem" class="NavigationListItem"><a id="ctl01_ciUtilityNavigation_ctl03__rptWrapper__rptWrapper_rpt_ctl02_NavigationLink" class="NavigationLink" onClick="MenuLI_OnClick('/IST/IST/MyAccount/CreateAccount.aspx?hkey=44e40f35-cbf4-4ac4-929b-39a8d9f1668b')" href="/IST/IST/MyAccount/CreateAccount.aspx?hkey=44e40f35-cbf4-4ac4-929b-39a8d9f1668b"><span class="nav-text" translate="yes">Create Account</span></a></li></ul> </div><div class="UtilitySection UtilityNavigationToggle"> <button onclick="return false;" class="navbar-toggle collapsed menu-toggle" data-toggle="collapse" data-target=".navbar-collapse" data-parent=".navbar"><span class="sr-only" translate="yes">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> </div><div class="UtilitySection UtilitySTEToggle ste-section"> </div> </div> <script type="text/javascript"> //<![CDATA[ // adapted from bs.collapse - if the search bar is open when opening the menu, close it (and vise versa) function UtilityAreaAction(utilityControl, hideArea) { jQuery(utilityControl).on('show.bs.collapse', function() { var actives = jQuery(hideArea).find('> .in'); if (actives && actives.length) { var hasData = actives.data('bs.collapse'); if (hasData && hasData.transitioning) return; actives.collapse('hide'); hasData || actives.data('bs.collapse', null); } }); } jQuery(document).ready(function () { UtilityAreaAction('.navbar-collapse', '.nav-auxiliary'); UtilityAreaAction('.searchbar-collapse', '.primary-navigation-area'); }); //]]> </script> </div> <div id="ste_container_ciNewUtilityNavigationCommon2" class="ContentItemContainer"><div id="ctl01_ciNewUtilityNavigationCommon2_UtilityPlaceholder" class="UtilityNavigation nav-auxiliary"> <div class="UtilitySection UtilityNavigationToggle"> <button onclick="return false;" class="navbar-toggle collapsed searchbar-toggle" data-toggle="collapse" data-target=".searchbar-collapse" data-parent=".navbar"><span class="sr-only" translate="yes">Toggle search</span> <span class="icon-search"></span> </button> </div><div class="UtilitySection UtilitySearch"> <div class="search-container-sm" data-set="searchbar"> <div class="header-search"> <div id="SimpleSearchBlock" role="search" class="SimpleSearchBlock"><label for="ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTerms" id="ctl01_ciNewUtilityNavigationCommon2_ctl05_Prompt" class="screen-reader-text" translate="yes">Keyword search</label><input name="ctl01$ciNewUtilityNavigationCommon2$ctl05$SearchTerms" type="text" value="Keyword search" id="ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTerms" class="search-field Watermarked" onfocus="SimpleSearchField_OnFocus('ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTerms', ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties);" onblur="SimpleSearchField_OnBlur('ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTerms', ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties);" onkeypress="return clickButton(event,'ctl01_ciNewUtilityNavigationCommon2_ctl05_GoSearch');" translate="yes" /><input type="button" name="ctl01$ciNewUtilityNavigationCommon2$ctl05$GoSearch" value="Go" onclick="SimpleSearchField_ExecuteSearch('ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTerms', ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties); return cancelEvent();__doPostBack('ctl01$ciNewUtilityNavigationCommon2$ctl05$GoSearch','')" id="ctl01_ciNewUtilityNavigationCommon2_ctl05_GoSearch" class="TextButton" /></div> </div> </div> </div><div id="ctl01_ciNewUtilityNavigationCommon2_search-collapse" class="searchbar-collapse collapse" style="height:auto;"> <div class="search-container" data-set="searchbar"> <div class="header-search"> <div id="ResponsiveSimpleSearchBlock" role="search" class="SimpleSearchBlock"><label for="ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTerms" id="ctl01_ciNewUtilityNavigationCommon2_ctl08_Prompt" class="screen-reader-text" translate="yes">Keyword search</label><input name="ctl01$ciNewUtilityNavigationCommon2$ctl08$SearchTerms" type="text" value="Keyword search" id="ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTerms" class="search-field Watermarked" onfocus="SimpleSearchField_OnFocus('ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTerms', ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties);" onblur="SimpleSearchField_OnBlur('ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTerms', ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties);" onkeypress="return clickButton(event,'ctl01_ciNewUtilityNavigationCommon2_ctl08_GoSearch');" translate="yes" /><input type="button" name="ctl01$ciNewUtilityNavigationCommon2$ctl08$GoSearch" value="Go" onclick="SimpleSearchField_ExecuteSearch('ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTerms', ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties); return cancelEvent();__doPostBack('ctl01$ciNewUtilityNavigationCommon2$ctl08$GoSearch','')" id="ctl01_ciNewUtilityNavigationCommon2_ctl08_GoSearch" class="TextButton" /></div> </div> </div> </div> </div> <script type="text/javascript"> //<![CDATA[ // adapted from bs.collapse - if the search bar is open when opening the menu, close it (and vise versa) function UtilityAreaAction(utilityControl, hideArea) { jQuery(utilityControl).on('show.bs.collapse', function() { var actives = jQuery(hideArea).find('> .in'); if (actives && actives.length) { var hasData = actives.data('bs.collapse'); if (hasData && hasData.transitioning) return; actives.collapse('hide'); hasData || actives.data('bs.collapse', null); } }); } jQuery(document).ready(function () { UtilityAreaAction('.navbar-collapse', '.nav-auxiliary'); UtilityAreaAction('.searchbar-collapse', '.primary-navigation-area'); }); //]]> </script> </div> <div id="ste_container_ciNewContentHtml_4c5b80c800b04a9a932a995d85dcc730" class="ContentItemContainer"><div class="HeaderSocial"><div id="ctl01_ciNewContentHtml_4c5b80c800b04a9a932a995d85dcc730_Panel_NewContentHtml"> <a href="https://www.linkedin.com/company/society-for-imaging-science-and-technology-is&t-"><img src="/images/Icons/linkedin36blue.png" alt="" style="width: 15px; height: 13px; margin-right: 10px;"></a> </div></div></div> </div> </div> </div> </div> <div data-label="Primary" class="ClearFix header-bottom-container"> <div class="header-container"> <div ID="WTZone3_Page1" class="WTZone "> <div id="ste_container_ciPrimaryNavigation" class="ContentItemContainer"> <div id="ctl01_ciPrimaryNavigation_PrimaryNavigationArea" class="primary-navigation-area"> <div id="ctl01_ciPrimaryNavigation_PrimaryNavigationControl" class="collapse navbar-collapse nav-primary"> <nav id="ctl01_ciPrimaryNavigation_NavControl_NavWrapper" aria-label="primary"> <div tabindex="0" id="ctl01_ciPrimaryNavigation_NavControl_NavMenu" class="RadMenu RadMenu_NaturalHeritageSites MainMenu MegaDropDownMenuOuter" Translate="Yes" PerspectiveId="80409b89-ae6d-45a9-a9d4-96d522ff2047" NavigationArea="1" MaxDataBindDepth="3" style="z-index:2999;"> <!-- 2024.3.805.462 --><input class="rmRootGroup rmToggleHandles rmHorizontal" id="ctl01_ciPrimaryNavigation_NavControl_NavMenu_ClientState" name="ctl01_ciPrimaryNavigation_NavControl_NavMenu_ClientState" type="hidden" /> </div> </nav> </div> </div></div> </div> </div> </div> <div id="HomePageContent" class="HomePageFullWidthArea" data-label="Home Content"> <div ID="WTZone4_Page1" class="WTZone "> <div id="ste_container_ciFullWidthContent" class="ContentItemContainer"> </div> </div> </div> <div class="InternalHeaderArea" data-label="Internal Header"> <div ID="WTZone5_Page1" class="WTZone "> <div id="ste_container_ciInternalFullWidthHeader" class="ContentItemContainer"> </div> </div> </div> </header> <div id="masterContentArea" class="container ClearFix"> <div role="main" class="body-container" id="ContentPanel"> <div data-label="On Behalf Of" class="ClearFix OnBehalfOfContainer"> <div ID="WTZone6_Page1" class="WTZone "> <div id="ste_container_ciOBO" class="ContentItemContainer"><div class="FloatRight"><div id="ctl01_ciOBO_UtilityPlaceholder" class="UtilityNavigation nav-auxiliary"> <div class="UtilitySection OnBehalfOf"> </div> </div> </div></div> </div> </div> <div class="col-primary" data-label="Main Content"> <div ID="WTZone7_Page1" class="WTZone "> <div id="mainContentWrapper" class="ContentPanel"> <div id="masterMainBreadcrumb" data-height-offset="true" ></div> <div id="ctl01_UserMessagesUpdatePanel"> </div> <script type="text/javascript"> </script> <div id="ctl01_TemplateBody_PublishUpdatePanel"> </div><div id="ctl01_TemplateBody_WebPartManager1___Drag" style="display:none;position:absolute;z-index:32000;filter:alpha(opacity=75);"> </div> <div> <div class="row"> <div class="col-sm-3"> <div class="ContentItemContainer"> <div id="WebPartZone1_Page1" class="WebPartZone "> <div class="iMIS-WebPart"> <div id="ste_container_ciCornerArt" class="ContentItemContainer"><div id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_ListTitle" class="panel " style="border-style:None;"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_ListTitle_Head" class="panel-heading"> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_ListTitle_BodyContainer" class="panel-body-container"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_ListTitle_Body" class="panel-body"> <div class="TaggedListPanel"> <span><div class='ContentHtml'><link href="https://fonts.googleapis.com/css2?family=Open+Sans:ital,wght@0,300;0,400;0,600;0,700;0,800;1,300;1,400;1,600;1,700;1,800&display=swap" rel="stylesheet" /> <link href="https://fonts.googleapis.com/css2?family=Montserrat:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,900&display=swap" rel="stylesheet" /> <link href="https://fonts.googleapis.com/css2?family=Jost:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&display=swap" rel="stylesheet" /></div><div class='ContentHtml'><style type="text/css"> #ConfContent { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 14px; font-weight: 400; } .ConfContent { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 14px; font-weight: 400; } #SymposiumContent { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 14px; font-weight: 400; } .SymposiumContent { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 14px; font-weight: 400; } ul { margin: 0 0 0 2em; padding: 0; } ol { margin: 0 0 0 2em; padding: 0; } P + UL { margin: 0em 0em 1em 2em; } P + OL { margin: 0em 0em 1em 2em; } P+ H2 { margin: 0 0 1em 0; } H2 + P { margin: 0 0 1em 0; } H2 + UL { margin: 0 0 1em 2em; } H2 + OL { margin: 0 0 1em 2em; } H3 + UL { margin: 0 0 1em 2em; } H3 + OL { margin: 0 0 1em 2em; } H3 + H2 { margin: 0 0 3em 2em; } H1 { color: #d2232a; background-color: white; font-family: 'Jost', sans-serif; font-size: 24px; font-weight: 500; text-transform: none; text-align: left; margin-top: 10px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 0px; padding-bottom: 0px; padding-left: 0; padding-right: 0; line-height: 1em; } #ConfContent h1 { color: #e8ba03; background-color: white; font-family: 'Jost', sans-serif; font-size: 24px; font-weight: 500; text-transform: none; text-align: left; margin-top: 10px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 0px; padding-bottom: 0px; padding-left: 0px; padding-right: 0px; line-height: 1.2em; } .ConfContent h1 { color: #e8ba03; background-color: white; font-family: 'Jost', sans-serif; font-size: 24px; font-weight: 500; text-transform: none; text-align: left; margin-top: 10px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 0px; padding-bottom: 0px; padding-left: 0px; padding-right: 0px; line-height: 1.2em; } H2 { color: #ffffff; background-color: #9f9f9f; font-family: 'Open Sans', sans-serif; font-size: 18px; text-transform: uppercase; font-weight: 400; display: block; padding-top: 2px; padding-bottom: 2px; padding-left: .33em; padding-right: 3px; margin-top: 1em; margin-bottom: .5em; margin-left: 0px; margin-right: 0px; margin-inline-start: 0px; margin-inline-end: 0px; } H3 { color: #d2232a; font-size: 15px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; display: block; border-bottom: 0px solid #ddd; margin: 20px 20px 20px 20px; margin-block-start: 0em; margin-block-end: 0em; margin-inline-start: 0px; margin-inline-end: 0px; } H4 { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 14px; font-weight: 500; } H5 { color: #d2232a; background-color: white; font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; display: block; border-bottom: 1px solid #ddd; margin: 0em 0; margin-block-start: 0em; margin-block-end: 0em; margin-inline-start: 0px; margin-inline-end: 0px; } H6 { color: #808080; font-family: 'Jost', sans-serif; font-size: 14px; font-weight: 500; } /* unvisited link */ a:link { color: #0032a0; font-weight: normal; } /* visited link */ a:visited { color: #0032a0; font-weight: normal; } /* mouse over link */ a:hover { color: #7f7f7f; } /* selected link */ a:active { color: #3b3b3b; font-weight: bold; } /* unvisited link */ #ConfContent a:link { color: #0032a0; font-weight: normal; } /* visited link */ #ConfContent a:visited { color: #0032a0; font-weight: normal; } /* mouse over link */ #ConfContent a:hover { color: #7f7f7f; } /* selected link */ #ConfContent a:active { color: #3b3b3b; font-weight: bold; } /* unvisited link */ .ConfContent a:link { color: #0032a0; font-weight: normal; } /* visited link */ .ConfContent a:visited { color: #0032a0; font-weight: normal; } /* mouse over link */ .ConfContent a:hover { color: #7f7f7f; } /* selected link */ .ConfContent a:active { color: #3b3b3b; font-weight: bold; } .ConfKeywords { font-size: 13px; font-family: 'Open Sans', sans-serif; color: #8C8C8C; font-weight: 400; font-style: italic; margin-top: 0em; margin-bottom: 0em; padding-bottom: 0em; padding-top: 0em; } #toplinkshead { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; display: block; border-bottom: 0px solid #ddd; margin: 20px 20px 20px 20px; margin-block-start: 1em; margin-block-end: 0em; margin-inline-start: 0px; margin-inline-end: 0px; } #toplinks { text-transform: uppercase; color: #0032a0; font-family: 'Jost', sans-serif; font-weight: 400; font-size: 14px; display: block; margin-bottom: 0; margin-left: 27px; } /* unvisited link */ #toplinks a:link { color: #0032a0; } /* visited link */ #toplinks a:visited { color: #0032a0; } /* mouse over link */ #toplinks a:hover { color: #7f7f7f; decoration: underline; } /* selected link */ #toplinks a:active { color: #7f7f7f; decoration: underline; } /* unvisited link */ #toplinkshead a:link { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; } /* visited link */ #toplinkshead a:visited { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; } /* mouse over link */ #toplinkshead a:hover { color: #7f7f7f; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; decoration: underline; } /* selected link */ #toplinkshead a:active { color: #7f7f7f; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; decoration: underline; } .toplinkshead { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; display: block; border-bottom: 0px solid #ddd; margin: 20px 20px 20px 20px; margin-block-start: 1em; margin-block-end: 0em; margin-inline-start: 0px; margin-inline-end: 0px; } #toplinks { text-transform: uppercase; color: #0032a0; font-family: 'Jost', sans-serif; font-weight: 400; font-size: 14px; display: block; margin-bottom: 0; margin-left: 27px; } /* unvisited link */ .toplinks a:link { color: #0032a0; } /* visited link */ .toplinks a:visited { color: #0032a0; } /* mouse over link */ .toplinks a:hover { color: #7f7f7f; decoration: underline; } /* selected link */ .toplinks a:active { color: #7f7f7f; decoration: underline; } /* unvisited link */ .toplinkshead a:link { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; } /* visited link */ .toplinkshead a:visited { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; } /* mouse over link */ .toplinkshead a:hover { color: #7f7f7f; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; decoration: underline; } /* selected link */ .toplinkshead a:active { color: #7f7f7f; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; decoration: underline; } .sub { vertical-align: sub } .sup { vertical-align: super } .mixedcase { text-transform: none; } .box{ margin-top: 11px; width: 825px; max-width: 100%; margin-top: 0px; margin-bottom: 0px; margin-left: 0; margin-right: 0; } .leftbox{ margin-top: 0px; margin-left: auto; margin-right: auto; width: 100% max-width: 100%; object-fit: contain; } .box img{ max-width: 100%; max-height: 100%; display: block; /* remove extra space below image */ object-fit: cover; } .EI21coursegroup_time{ color: #4488DC; font-size: 12pt; font-weight: bold; } .SubmissionLink::before { content: " "; } .SubmissionLink { font-size: 9pt !important; font-family: 'Montserrat', sans-serif; font-weight: 600 !important; text-transform: uppercase !important; color: #d2232a !important; } /* unvisited link */ a.SubmissionLink:link { font-size: 9pt !important; font-family: 'Montserrat', sans-serif; font-weight: 600 !important; text-transform: uppercase !important; color: #d2232a !important; } /* visited link */ a.SubmissionLink:visited { font-size: 9pt !important; font-family: 'Montserrat', sans-serif; font-weight: 600 !important; text-transform: uppercase !important; color: #d2232a !important; } /* mouse over link */ a.SubmissionLink:hover { font-size: 9pt !important; font-family: 'Montserrat', sans-serif; font-weight: 600 !important; text-transform: uppercase !important; color: #d2232a !important; decoration: underline !important; } /* selected link */ a.SubmissionLink:active { font-size: 9pt !important; font-family: 'Montserrat', sans-serif; font-weight: 600 !important; text-transform: uppercase !important; color: #d2232a !important; decoration: underline !important; } .nomarginnopadding { margin: 0; padding: 0; } .SessionOrganizer { margin-left: 27px; } .CommitteeMember { font-size: 13px; font-weight: 500; } .CommitteeMemberAffil { font-size: 13px; font-weight: 400; font-style: normal; } .AwardName { font-weight: 600; color: #3f3f3f; } strong { font-weight: 600; } .BoldItalic { font-weight: 600; font-style: italic; } .Thin { font-weight: 100; } .ExtraLight { font-weight: 200; } .Light { font-weight: 300; } .Regular{ font-weight: 400; } .Medium { font-weight: 500; } .SemiBold { font-weight: 600; } .ExtraBold { font-weight: 800; } .Black { font-weight: 900; } caption { display: table-caption; caption-side: bottom; text-align: left; color: #000000; font-family: 'Open Sans', sans-serif; font-size: 12px; font-weight: 200; font-style: italic; line-height: 1.6em; padding: 0.5em; } .TopAlert { font-weight: 300; font-style: italic; font-size: 16px; font-family: Jost, sans-serif; color: #d2232a; margin-top: -2px; margin-bottom: 5px; } a.RegisterLink{ background-color: #d2232a; border-radius: 3px; color: white; padding: 6px 12px; text-align: center; text-decoration: none; display: inline-block; font-size: 14px; margin: 4px 2px; transition-duration: 0.4s; cursor: pointer; } a.RegisterLink:hover { background-color: #b3b3b3; border-radius: 3px; color: white; padding: 6px 12px; text-align: center; text-decoration: none; display: inline-block; font-size: 14px; margin: 4px 2px; transition-duration: 0.4s; cursor: pointer; } </style></div><div class='ContentHtml'><style> /*this is the all-purpose callout. It is behind the keynotes It is grey */ .callout{ background-color:#FFFBED; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } /*this is the callout for panels and special events. It is yellow */ .coloredcallout{ background-color: #F2F2F2; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } /*this is the plenary callout. It is pink */ .pinkcallout{ background-color: #FFEEED; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } #content #story .callout .session_title { line-height: normal; margin-bottom: 1px; } .group { font-size: 14pt; font-weight: bold; text-align: center; } .cat { font-size: 16pt; font-weight: bold; color: red; text-align: center; } .session_time { font-size: 10pt; font-weight: bold; display: inline-block; margin-bottom: 2ex; } .event_time { font-size: 10pt; font-weight: normal; text-align: center; color: #c00000; padding-bottom: 10px; } .date { color: #c00000; background-color: #ffffff; font-size: 18px; font-weight: 500; text-transform: uppercase; margin-top: 11px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 7px; padding-bottom: 7px; padding-left: 0px; padding-right: 0; line-height: 1em; } .session_title { font-size: 11pt; color: #3b3b3b; font-weight: bold; margin-top: 0ex; margin-bottom: 0ex; } .session_title:before{ content: ' '; display: block; border: 0; border-top: 1px solid #c00000; margin-top: 1ex; margin-bottom: 1ex; } .session_title:after { content: ' '; display: block; border: 0; border-bottom: 1px solid #c00000; margin-top: 1ex; margin-bottom: 1ex; } .chair_label { font-size: 10pt; font-weight: bold; } .chair { font-size: 10pt; } p span.author_string { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 400; font-style: italic; } p span.placeholder_desc { font-size: 12pt; font-weight: normal; } .room { font-size: 11pt; color: #c00000; } .presentation_title { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 500; } .presentation_time { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 500; } .session_notes { color: #3b3b3b; font-style: italic; font-weight: 300; font-size: 9pt; display: block; margin-bottom: 1ex; } .abstract{ color: #3b3b3b; font-style: italic; font-weight: 400; display: block; margin-bottom: 1ex; } .bio { color: #3b3b3b; font-style: italic; font-weight: 400; display: block; margin-bottom: 1ex; } .keynote-bio{ font-size: 11px; color: #3b3b3b; font-style: italic; font-weight: 400; display: block; margin-bottom: 1ex; } </style></div><div class='ContentHtml'><div class="leftbox"><a href="http://www.imaging.org/IST/IST/Conferences/EI/EI2023/EI2023.aspx"><img alt="" src="/images/IST_Images/Conferences/EI/EI2023/EI2022_250x125_January2023.png" style="margin-top: 0px; width: 100%;" /></a> </div></div><div class='ContentHtml'><style type="text/css"> /* CSS Dropdown menuv */ #menuv-container { max-width: 100%; display: block; margin-left: 0px; margin-right: auto; width: 250px } #menuv { font-size: 13px; font-family: 'Jost', sans-serif; text-transform: uppercase; max-width:100%; width:100%; float:left; margin-top: 0em; margin-right: 0em; margin-bottom: 0em; margin-left: 0em; border: auto solid #ffffff; background-color: #ffffff; /* white*/ } #menuv_NAV{ padding: 0; margin: 0; border: 0; } #menuv_NAV ul, #menuv_NAV li { list-style: none; margin: 0; padding: 0; } #menuv_NAV ul { position: relative; z-index: 597; float: left; } #menuv_NAV ul li { float: left; min-height: 1px; line-height: 1.5em; vertical-align: middle; } #menuv_NAV ul li.hover, #menuv_NAV ul li:hover { position: relative; z-index: 599; cursor: default; } #menuv_NAV ul ul { visibility: hidden; position: absolute; top: 100%; left: 0; z-index: 598; width: 100%; } #menuv_NAV ul ul li { float: none; } #menuv_NAV ul ul, #menuv_NAV ul ul ul { top: -2px; left: 99%; } #menuv_NAV ul li:hover > ul { visibility: visible; } #menuv_NAV ul li { float: none; } #menuv_NAV a { display: block; font-weight: 400 !important; } /* Custom CSS Styles */ #menuv_NAV { font-family: 'Jost', sans-serif; text-transform: uppercase; font-size: 13px; } #menuv_NAV:after, #menuv_NAV ul:after { content: ''; display: block; clear: both; } #menuv_NAV ul { background: #EEEEEE; border: 0px solid #aaaaaa; padding: 4px; width: 100%; } #menuv_NAV ul li { color: #0C0C0C; position: relative; } #menuv_NAV ul li.hover, #menuv_NAV ul li:hover { background: #cccccc; background: -moz-linear-gradient(#cccccc 0%, #cccccc100%); background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #9f9f9f), color-stop(100%, #cccccc )); background: -webkit-linear-gradient(#cccccc 0%, #cccccc 100%); background: linear-gradient(#cccccc 0%, #cccccc 100%); color: #FFF; } #menuv_NAV ul li.hover > a, #menuv_NAV ul li:hover > a { color: #000; border: 0px solid #cccccc; } #menuv_NAV ul ul { width: 650px; } #menuv_NAV a { border: 0px solid transparent; padding: 3px 10px; } #menuv_NAV a:link, #menuv_NAV a:visited { color: #0C0C0C; text-decoration: none; } #menuv_NAV a:hover { background: #cccccc; background: -moz-linear-gradient(#cccccc 0%, #cccccc 100%); background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #cccccc ), color-stop(100%, #cccccc )); background: -webkit-linear-gradient(#cccccc 0%, #cccccc 100%); background: linear-gradient(#cccccc 0%, #cccccc 100%); color: #FFF; } #menuv_NAV a:active { color: #ffa500; } #menuv_NAV .has-sub:hover > a:after, #menuv_NAV .has-sub.hover > a:after { border-color: transparent transparent transparent #FFF; } #menuv_NAV .has-sub > a:after { content: ''; width: 0px; height: 0px; border-style: solid; border-width: 0px 0px 0px 0px; border-color: transparent transparent transparent #808080; position: absolute; top: 50%; right: 5%; margin-top: -4px; -webkit-transform: rotate(360deg); } </style> <div id="menuv-container"> <div id="menuv_NAV"> <ul> <li> </li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/Attend___Register/IST/Conferences/EI/EI2023/Attend.aspx" target="_blank"><span style="color: #d2232a;">REGISTER</span></a></li> </ul> <ul> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=1#EntryCCO'">EI Home/About</a> <ul> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=1#EntryCCO">Home</a></li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=2#EntryCCO'">At-a-Glance</a></li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=3#EntryCCO">Awards</a> </li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=4#EntryCCO">EI History</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/TAB_Code_of_Conduct.aspx" target="_blank">Code of Conduct</a></li> <li><a href="http://www.imaging.org/IST/IST/About/Press_Releases.aspx" target="_blank">Press Releases</a> </li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx">Symposium Program</a> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx">EI Program</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=2#ProgramCCO">Symposium Plenary Speakers</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=3#ProgramCCO">EI Conferences</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=4#ProgramCCO">Conference Keynotes</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=5#ProgramCCO">Short Courses</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=6#ProgramCCO">Demonstration & Poster Sessions</a></li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=2#EntryCCO'">Program At-a-Glance</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=7#ProgramCCO">Author Index</a></li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=5#ProgramCCO">Short Courses</a></li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=3#ProgramCCO" class="top_parent">Conferences</a> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=3#ProgramCCO">EI Conferences</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_3DMP.aspx">3D Imaging and Applications 2023 (3DIA)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_AVM.aspx">Autonomous Vehicles and Machines 2023 (AVM)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_COLOR.aspx">Color Imaging XXVIII: Displaying, Processing, Hardcopy, and Applications (COLOR)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_COIMG.aspx">Computational Imaging XXI (COIMG)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_CVAA.aspx">Computer Vision and Image Analysis of Art 2023 (CVAA)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_ERVR.aspx">Engineering Reality of Virtual Reality 2023 (ERVR)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_HPCI.aspx">High Performance Computing for Imaging 2023 (HPCI)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_HVEI.aspx">Human Vision and Electronic Imaging 2023 (HVEI)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_IPAS.aspx">Image Processing: Algorithms and Systems XXI (IPAS)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_IQSP.aspx">Image Quality and System Performance XX (IQSP)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_IMAGE.aspx">Imaging and Multimedia Analytics at the Edge 2023 (IMAGE)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_ISS.aspx">Imaging Sensors and Systems 2023 (ISS)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_IRIACV.aspx">Intelligent Robotics and Industrial Applications using Computer Vision 2023 (IRIACV)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_MLSI.aspx">Machine Learning for Scientific Imaging 2023 (MLSI)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_MWSF.aspx">Media Watermarking, Security, and Forensics 2023 (MWSF)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_MOBMU.aspx">Mobile Devices and Multimedia: Enabling Technologies, Algorithms, and Applications 2023 (MOBMU)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_SDA.aspx">Stereoscopic Displays and Applications XXXIV (SD&A)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_VDA.aspx">Visualization and Data Analysis 2023 (VDA)</a></li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=2#ProgramCCO">Symposium Plenary Speakers</a></li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/AuthorSubmit.aspx" class="top_parent">Author/Submit</a> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/AuthorSubmit.aspx?Author_Info=1">Submit How-to</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/AuthorSubmit.aspx?Author_Info=2">Accepted: Next Steps</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=6#ProgramCCO">Demonstration Session</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/AuthorSubmit.aspx?Author_Info=3">Publication FAQ</a></li> <li><a href="https://www.imaging.org/PDFS/Conferences/ElectronicImaging/EI_InvitationLetterRequest_Form_Fillable.pdf">Visas and Letters of Invitation</a></li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/Attend___Register/IST/Conferences/EI/EI2023/Attend.aspx" class="top_parent">Attend/Register</a> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Attend.aspx">Registration & Fees</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Attend.aspx?Attendee_Information=2#Attendee_Information">Logistics</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Attend.aspx?Attendee_Information=3#Attendee_Information">Why Attend</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Attend.aspx?Attendee_Information=3#JustificationLetter.aspx">Justify Attendance</a> </li> <li><a href="https://www.imaging.org/PDFS/Conferences/ElectronicImaging/EI_InvitationLetterRequest_Form_Fillable.pdf">Visas and Letters of Invitation</a></li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/ExhibitSponsor.aspx" class="top_parent">Exhibit/Sponsor</a> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/ExhibitSponsor.aspx">Exhibition & Sponsorship Opportunities</a> </li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/For_Students/IST/Conferences/EI/EI2023/For_Students.aspx" class="top_parent">For Students</a> <ul> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/For_Students/IST/Conferences/EI/EI2023/For_Students.aspx" class="top_parent">Student Focus</a></li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/For_Students/IST/Conferences/EI/EI2023/For_Students.aspx?Student_Focus_Tabs=2#Student_Focus_Tabs">Student Showcase</a></li> </ul> </li> </ul> </div> <!-- end the menuv-container div --> </div> <!-- end the menuv div --></div><div class='ContentHtml'><div class="leftbox"> <table style="text-align: center; margin-left: auto; margin-right: auto;" width="100%" border="0"> <tbody> <tr> <td colspan="2" 7px;"valign="middle" align="center"> </td> </tr> <tr> <td style="height: 19px;" valign="top" align="right"> <!--- begin LinkedIn Share ---> <script src="https://platform.linkedin.com/in.js" type="text/javascript">lang: en_US</script> <script type="IN/Share" data-url="https://www.linkedin.com"></script> <!--- End LinkedIn Share ---> </td> <td style="height: 19px;" valign="top" align="left"> <!--- begin Twitter Share ---> <a href="https://twitter.com/share" class="twitter-share-button" data-count="none" data-hashtags="EI2023">Tweet</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> <!--- end Twitter Share ---> </td> </tr> <tr> <td colspan="2" valign="middle" align="center"> <!--- begin Twitter Follow ---> <a href="https://twitter.com/ElectroImaging" class="twitter-follow-button" data-show-count="false">Follow @ElectroImaging</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> <!--- end Twitter Follow ---><br> </td> </tr> </tbody> </table> </div> <br></div><div class='ContentHtml'><style> .greybox { background-color: #f2f2f2; text-align: center; } table.ImportantDates { color: #000000; margin-left: auto; margin-right: auto; border: 4px solid #f2f2f2; padding-top: 3px; padding-bottom: 3px; padding-left: 1px; padding-right: 1px; font-family: Jost, sans-serif; font-size: 10px; font-weight: 300; line-height: 1.1; vertical-align: top; } .ImpDateDescription{ padding-top: 3px; padding-bottom: 3px; text-align: left; font-weight: 400; vertical-align: top; } .ImpDateSubDescription{ padding-top: 3px; padding-bottom: 3px; text-indent: -6px; padding-left: 6px; text-align: left; font-weight: 400; color: #7f7f7f; vertical-align: top; } .impdatedate { padding-top: 3px; padding-bottom: 3px; text-align: center; font-weight: 400; color: #7f7f7f; font-family: Jost, sans-serif; vertical-align: top; text-align: center;" } .impdatedatesub { padding-top: 3px; padding-bottom: 3px; text-align: center; font-weight: 400; color: #7f7f7f; font-family: Jost, sans-serif; vertical-align: top; text-align: center;" } </style> <div class="leftbox"> <div class="greybox"> <a name="Deadlines" id="Deadlines"></a> <table class="ImportantDates" align="center"> <thead> <tr> <td colspan="2" style="text-align: center; white-space: nowrap;"><span style="font-weight: 500; font-size: 14px; font-family: Jost, sans-serif; color: #d2232a;">IMPORTANT DATES<br /> </span> <span style="font-size: 10px; color: #d2232a;"><em>Dates currently being confirmed; check back.</em></span> </td> </tr> <tr> <td colspan="2"> </td> </tr> </thead> <tbody> <tr> <td class="ImpDateDescription"> <br /> </td> <td class="impdate"><span style="font-weight: 500;">2022</span></td> </tr> <tr> <td class="ImpDateDescription">Call for Papers Announced</td> <td class="impdate">2 May</td> </tr> <tr> <td class="ImpDateDescription">Journal-first (JIST/JPI) Submissions <br /> </td> <td class="impdate"><br /> </td> </tr> <tr> <td class="ImpDateSubDescription">∙ Submission site Opens</td> <td class="impdatedatesub">2 May </td> </tr> <tr> <td class="ImpDateSubDescription">∙ Journal-first (JIST/JPI) Submissions Due</td> <td class="impdatedatesub">1 Aug</td> </tr> <tr> <td class="ImpDateSubDescription">∙ Final Journal-first manuscripts due</td> <td class="impdatedatesub">28 Oct</td> </tr> <tr> <td class="ImpDateDescription">Conference Papers Submissions</td> <td class="impdate"><br /> </td> </tr> <tr> <td class="ImpDateSubDescription">∙ Abstract Submission Opens</td> <td class="impdatedatesub">1 June</td> </tr> <tr> <td class="ImpDateSubDescription">∙ Priority Decision Submission Ends</td> <td class="impdatedatesub">15 July</td> </tr> <tr> <td class="ImpDateSubDescription">∙ Extended Submission Ends</td> <td class="impdatedatesub"> 19 Sept</td> </tr> <tr> <td class="ImpDateSubDescription">∙ FastTrack Conference Proceedings Manuscripts Due</td> <td class="impdatedatesub">25 Dec </td> </tr> <tr> <td class="ImpDateSubDescription">∙ All Outstanding Proceedings Manuscripts Due<br /> </td> <td class="impdatedatesub" style="white-space: nowrap;"> 6 Feb 2023</td> </tr> <tr> <td class="ImpDateDescription">Registration Opens</td> <td class="impdate" style="white-space: nowrap;">1 Dec</td> </tr> <tr> <td class="ImpDateDescription">Demonstration Applications Due</td> <td class="impdate">19 Dec</td> </tr> <tr> <td class="ImpDateDescription">Early Registration Ends</td> <td class="impdate">18 Dec</td> </tr> <tr> <td class="ImpDateDescription"><br /> </td> <td class="impdate" style="text-align: center;"><br /> <span style="font-weight: 500;">2023</span></td> </tr> <tr> <td class="ImpDateDescription">Hotel Reservation Deadline</td> <td class="impdate">6 Jan</td> </tr> <tr> <td class="ImpDateDescription">Symposium begins<br /> </td> <td class="impdate">15 Jan<br /> </td> </tr> <tr> <td class="ImpDateDescription"><br /> </td> <td class="impdate"><br /> </td> </tr> </tbody> </table> </div> </div></div></span> <div translate="yes"> </div> </div> </div> </div> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_downloadContainer" style="display:none;"> <input type="hidden" name="ctl01$TemplateBody$WebPartManager1$gwpciCornerArt$ciCornerArt$HiddenDownloadPathField" id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_HiddenDownloadPathField" /><input type="submit" name="ctl01$TemplateBody$WebPartManager1$gwpciCornerArt$ciCornerArt$downloadButton" value="Download Path" id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_downloadButton" style="display:none" /> </div></div> </div> <div class="iMIS-WebPart"> <div id="ste_container_ciSponsors_711904745b114fcda30b29f874f3130e" class="ContentItemContainer"><div id="ctl01_TemplateBody_WebPartManager1_gwpciSponsors_711904745b114fcda30b29f874f3130e_ciSponsors_711904745b114fcda30b29f874f3130e_Panel_Sponsors"> </div></div> </div> </div> <div id="ctl01_TemplateBody_ContentPage1_downloadContainer" style="display:none;"> <input type="hidden" name="ctl01$TemplateBody$ContentPage1$HiddenDownloadPathField" id="ctl01_TemplateBody_ContentPage1_HiddenDownloadPathField" /><input type="submit" name="ctl01$TemplateBody$ContentPage1$downloadButton" value="Download Path" id="ctl01_TemplateBody_ContentPage1_downloadButton" style="display:none" /> </div></div> </div> <div class="col-sm-9"> <div class="ContentItemContainer"> <div id="WebPartZone2_Page1" class="WebPartZone "> <div class="iMIS-WebPart"> <div id="ste_container_ciConfCCO" class="ContentItemContainer"><div class="panel "> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO__Head" class="panel-heading"> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO__BodyContainer" class="panel-body-container"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO__Body" class="panel-body"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_MainContentControl" class="cco tabs-wrapper tabs-horizontal tabs-top"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radTab_Top" class="RadTabStrip RadTabStrip_MetroTouch RadTabStripTop_MetroTouch RadTabStripTop RadTabStripTop_MetroTouch_Baseline"> <div class="rtsLevel rtsLevel1"> <ul class="rtsUL"><li class="rtsLI rtsFirst"><a class="rtsLink rtsBefore" href="#"><span class="rtsOut"><span class="rtsIn"><span class="rtsTxt">About COIMG 2023</span></span></span></a></li><li class="rtsLI"><a class="rtsLink rtsSelected" href="#"><span class="rtsOut"><span class="rtsIn"><span class="rtsTxt">COIMG Program</span></span></span></a></li><li class="rtsLI"><a class="rtsLink rtsAfter" href="#"><span class="rtsOut"><span class="rtsIn"><span class="rtsTxt">For COIMG Authors</span></span></span></a></li><li class="rtsLI rtsLast"><a class="rtsLink" href="#"><span class="rtsOut"><span class="rtsIn"><span class="rtsTxt">COIMG History/Proceedings</span></span></span></a></li></ul> </div><input id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radTab_Top_ClientState" name="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radTab_Top_ClientState" type="hidden" /> </div> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage" class="RadMultiPage RadMultiPage_Default"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_1" class="rmpView rmpHidden"> <div class="ContentTabbedDisplay AddPadding"> <p class="AsiWarning">No content found</p> </div> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_2" class="rmpView"> <div class="ContentWizardDisplay ClearFix"><div> <div class="row"> <div class="col-sm-12"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Zone1PlaceHolder" class="WebPartZone"> <div id="ste_container_ConferenceHeading" class="ContentItemContainer"><style type="text/css"> /*this is the all-purpose callout. It is behind the keynotes It is grey*/ .callout{ background-color:#f2f2f2; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } /*this is the callout for panels and special events. It is yellow*/ .coloredcallout{ background-color: #fff9e6; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } /*this is the plenary callout. It used to be pink, now it's light purple*/ .pinkcallout{ background-color: #e6e6ff; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } #content #story .callout .session_title { line-height: normal; margin-bottom: 1px; } .group { font-size: 14pt; font-weight: bold; text-align: center; } .cat { font-size: 16pt; font-weight: bold; color: red; text-align: center; } .session_time { font-weight: 300; font-size: 13px; font-family: Jost, sans-serif; display: inline-block; margin-bottom: 2ex; } .event_time { font-family: 'Jost', sans-serif; font-size: 14px; font-weight: 400; text-align: center; color: #A2002D; padding-bottom: 10px; } .date { font-family: 'Jost', sans-serif; color: #A2002D; background-color: #ffffff; font-size: 18px; font-weight: 500; text-transform: uppercase; margin-top: 11px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 7px; padding-bottom: 7px; padding-left: 0px; padding-right: 0; line-height: 1em; } .session_title { font-family: 'Open Sans', sans-serif; font-size: 14px; color: #3b3b3b; font-weight: 600; margin-top: 0ex; margin-bottom: 0ex; } .session_title:before{ content: ' '; display: block; border: 0; border-bottom: 1px solid #A2002D; background: #A2002D; margin-top: 1ex; margin-bottom: 1ex; } .session_title:after { content: ' '; display: block; border: 0; border-bottom: 1px solid #A2002D; background: #A2002D; margin-top: 1ex; margin-bottom: 1ex; } .chair_label { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 600; } .chair { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 400; } p span.author_string { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 400; font-style: italic; } p span.placeholder_desc { font-size: 12pt; font-weight: normal; } .room { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #808080; } .redroom { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #b30000; } .greenroom { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #00802b; } .blueroom { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #002db3; } .yellowroom { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #ffcc00; } .purpleroom { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #6600CC; } .presentation_title { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 700; } .presentation_time { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 500; } .session_notes { color: #0C0C0C; font-weight: 300; font-style: italic; font-size: 14px; font-family: Jost, sans-serif; display: block; margin-bottom: 1ex; } .abstract{ color: #0C0C0C; font-weight: 300; /*font-style: italic;*/ font-size: 13px; font-family: Jost, sans-serif; display: block; margin-bottom: 1ex; } .bio { color: #0C0C0C; font-style: italic; font-weight: 400; display: block; margin-bottom: 1ex; } .abstract_link { color: #2e769e; cursor:pointer; } </style> <h1>Computational Imaging XXI</h1> <p class="date">Monday 16 January 2023</p> <div class="callout"> <p class="session_title">KEYNOTE: Neutron Imaging Beyond Traditional Radiography (M1)</p> <span class="chair">Session Chairs: Alexander Long, Los Alamos National Laboratory (United States) and Sven Vogel, Los Alamos National Laboratory (United States)<br> </span><span class="session_time">8:45 – 10:20 AM</span> <br> <span class="room">Market Street </span> <br> <span></span><br> <p class="presentation_time" style="text-align:left;">8:45<br> <span class="presentation_title">Conference Welcome</span> </p> <p class="presentation_time" style="text-align:left;">8:50<a name="COIMG-129"></a><span style="float: right;">COIMG-129</span> <br> <span class="presentation_title" final_id="COIMG-129" onclick="toggle_me()" style="cursor: pointer;">KEYNOTE: Advanced neutron imaging, </span><span class="author_string" final_id="COIMG-129" onclick="toggle_me()" style="cursor: pointer;">Markus Strobl</span><span class="author_string" final_id="COIMG-129" onclick="toggle_me()" style="cursor: pointer;">, Paul Scherrer Institut (PSI) (Switzerland)</span><span class="abstract_link" final_id="COIMG-129" onclick="toggle_me()"> [view abstract] </span></p> <p class="session_notes">Prof. Dr. Markus Strobl is the leader of the Applied Materials group at the Paul Scherrer Institute (PSI) of Switzerland. The Applied Materials Group (AMG) is a group within the Laboratory for Neutron Scattering and Imaging LNS, in the division research with Neutrons and Muons NUM of PSI. AMG is operating 2 dedicated neutron imaging facilities and the neutron strain scanner (diffractometer) POLDI for users from scientific institutions and industry. AMG also provides complementary x-ray imaging (in-situ bi-modal) and has dedicated beamtimes for imaging studies at the test beamline BOA providing an intense cold polarized neutron beam. Strobl has over 230 publications in the field.</p> <p class="abstract" final_id="COIMG-129" id="abstract-COIMG-129" onclick="toggle_me()" style="display:none; cursor:pointer;">The last decade in neutron imaging saw extensive activity in method development. This was only partially due to novel pulsed neutron sources and corresponding new imaging instruments having been established. Also at the state-of-the-art instruments at continuous sources wavelength resolution, polarization and grating interferometry, to name a few, changed the field of neutron imaging sustainably. Both, these new methodical developments but also increased accuracy and demand for quantification, implied a need for more detailed and concise description of the utilized neutron interaction, in particular with respect to neutron scattering. Thus, finally the scattering aspect in neutron imaging gained significantly in weight, which on the one hand puts additional demands on neutron imaging scientists and on the other hand increases the integration of neutron imaging within the suite of neutron science techniques and their applications at large scale neutron sources. Consequently, the basic principles required to be embraced with the access to novel methods, interactions and information shall be out- and underlined. A number of interactions providing valuable new information in novel techniques are known for causing artifacts in conventional applications. These are more important with respect to the better resolution achieved nowadays and the corresponding higher demands for quantification. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">9:20<a name="COIMG-130"></a><span style="float: right;">COIMG-130</span> <br> <span class="presentation_title" final_id="COIMG-130" onclick="toggle_me()" style="cursor: pointer;">Material decomposition in neutron time-of-flight radiography, </span><span class="author_string" final_id="COIMG-130" onclick="toggle_me()" style="cursor: pointer;">Thilo Balke<sup>1</sup>, </span><span class="author_string" final_id="COIMG-130" onclick="toggle_me()" style="cursor: pointer;">Alexander M. Long<sup>2</sup>, </span><span class="author_string" final_id="COIMG-130" onclick="toggle_me()" style="cursor: pointer;">Sven C. Vogel<sup>2</sup>, </span><span class="author_string" final_id="COIMG-130" onclick="toggle_me()" style="cursor: pointer;">Brendt Wohlberg<sup>2</sup>, and </span><span class="author_string" final_id="COIMG-130" onclick="toggle_me()" style="cursor: pointer;">Charles A. Bouman<sup>1</sup></span><span class="author_string" final_id="COIMG-130" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Purdue University and <sup>2</sup>Los Alamos National Laboratory (United States)</span><span class="abstract_link" final_id="COIMG-130" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-130" id="abstract-COIMG-130" onclick="toggle_me()" style="display:none; cursor:pointer;">As a pulsed neutron beam travels through a sample, neutrons arrive at the imaging detector over a range of time according to their velocity, i.e. kinetic energy. The radiographic measurements at the detector are thus inherently energy resolved. Since the nuclei in the sample have characteristic absorption spectra that are isotope specific, the hyperspectral measurements contain information about the density distribution of the isotopes in the sample. We present a methodology to estimate the isotopic distribution in the sample. However, this task is very challenging due to the extremely large amount of noise even with long exposures, and the large number of unknown parameters such as the neutron beam flux and background events, and the finite pulse width. All these have to be properly accounted for in order to achieve quantitatively accurate density estimates. Our approach deals with the extreme noise by modeling the system in its native, non-linear domain and an ability of native modeling as Poisson noise. The estimation of large amount nuisance parameters is achieved through a novel reduction in dimensionality. We demonstrate our approach with experimental data from FP-5 at LANSCE as well as Monte Carlo simulations. Our reconstruction densities in 2D (radiography) and 3D (tomography) are quantitatively verified through multiple means such as known ground truth samples, comparison to state-of-the art nuclear code, and mass spectrometry. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">9:40<a name="COIMG-131"></a><span style="float: right;">COIMG-131</span> <br> <span class="presentation_title" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Artificial intelligence-driven hyperspectral neutron computed tomography (HSnCT) systems, </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Shimin Tang<sup>1</sup>, </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Diyu Yang<sup>2</sup>, </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Mohammad S. Chowdhury<sup>2</sup>, </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Singanallur Venkatakrishnan<sup>1</sup>, </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Hassina Z. Bilheux<sup>1</sup>, </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Charles A. Bouman<sup>2</sup>, </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Gregery T. Buzzard<sup>2</sup>, </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Jean-Christophe Bilheux<sup>1</sup>, </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">George J. Nelson<sup>3</sup>, </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Maria Cekanova<sup>4</sup>, and </span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">Ray Gregory<sup>1</sup></span><span class="author_string" final_id="COIMG-131" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Oak Ridge National Laboratory, <sup>2</sup>Purdue University, <sup>3</sup>University of Alabama in Huntsville, and <sup>4</sup>Integrity Laboratories (United States)</span><span class="abstract_link" final_id="COIMG-131" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-131" id="abstract-COIMG-131" onclick="toggle_me()" style="display:none; cursor:pointer;">Hyperspectral neutron computed tomography (HSnCT) is a technique that can provide complementary information to X-ray computed tomography (CT). Recently wavelength-dependent imaging beamlines that can measure Bragg edge and resonance spectra have been built at spallation neutron sources. The traditional approach to reconstruction involves acquiring projection data by rotating the object over a predefined set of angles about a single axis. Once all the data has been collected (typically over the span of a few days) the volume is reconstructed using an analytical reconstruction algorithm such as the filtered back projection (FBP). However, this method is not efficient for pulsed sources because : 1) each projection is time-consuming, i.e. usually requiring several hours of acquisition time to reach a reasonable signal-to-noise ratio (SNR); 2) the projection directions are not adapted to the sample features, and 3) the reconstruction is always done at the end of the experiment which might yield low SNR and artifact-ridden reconstructions at which point several days’ worth of measurement time has been spent. In this talk, we will present algorithms for an autonomous HSnCT system that can acquire projection directions in a sample in an adaptive manner and assess the quality of scans in real-time during the measurement. Our method involves acquiring data and reconstructing using a model-based iterative reconstruction approach. Furthermore, after a few projections have been measured, the system utilizes an edge-alignment scanning angle selection algorithm to dynamically decide the next projection direction. Simultaneously, a 3D convolutional neural network (CNN) estimates the reconstruction quality after each scan in order to determine if the scan can be stopped. Our proposed method can lead to dramatic decreases in acquisition time for HSnCT scans while acquiring data from the most informative orientations that are adaptive to the object being measured. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">10:00<a name="COIMG-132"></a><span style="float: right;">COIMG-132</span> <br> <span class="presentation_title" final_id="COIMG-132" onclick="toggle_me()" style="cursor: pointer;">Enabling turnkey multiscale imaging/tomography of advanced materials with powerful and intuitive software, </span><span class="author_string" final_id="COIMG-132" onclick="toggle_me()" style="cursor: pointer;">Adrian Brügger</span><span class="author_string" final_id="COIMG-132" onclick="toggle_me()" style="cursor: pointer;">, Columbia University (United States)</span><span class="abstract_link" final_id="COIMG-132" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-132" id="abstract-COIMG-132" onclick="toggle_me()" style="display:none; cursor:pointer;">We present the work of the speaker’s Columbia team along with collaborators at the Oak Ridge National Laboratory (ORNL) and Los Alamos National Laboratory (LANL) on novel methods to capture neutron imaging data at multiple length scales using a variety of complementary and simultaneous techniques. Traditional attenuation-based neutron imaging and tomography is generally limited by real-space resolution of sensors, on the order of 50 microns. The full information volume of neutrons is only now being realized through the more widespread use of complementary techniques such as Bragg edge imaging (BEI), neutron grating interferometry (nGI), and hyperspectral imaging. The development of a novel beamline, CUPI ^2 D, the Complex, Unique and Powerful Imaging Instrument for Dynamics at ORNL, presents a unique opportunity to fine-tune these new methods and make them accessible to a broad user base. This development must focus on delivering not only experimental capabilities to users with turnkey workflows in experiment design/execution (sample preparation, doping, alignment, etc.), but most importantly provide data reduction and refinement (computed tomography, porosity measurement, strain mapping, etc.) tools that can be used by engineers and scientists outside the neutron scattering field. The success of new instruments invariably depends on the accessibility of the instrument’s underlying software to a diverse user base. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> </div> <br> <br> <br> <p class="event_time">10:20 – 10:50 AM Coffee Break</p> <p class="session_title">Neutron Imaging Beyond Traditional Radiography (M2)</p> <span class="chair_label">Session Chairs: </span> <span class="chair">Alexander Long, Los Alamos National Laboratory (United States) and Sven Vogel, Los Alamos National Laboratory (United States)<br> </span> <span class="session_time">10:50 AM – 12:30 PM</span> <br> <span class="room">Market Street<br> </span> <p class="presentation_time" style="text-align:left;">10:50<a name="COIMG-133"></a><span style="float: right;">COIMG-133</span> <br> <span class="presentation_title" final_id="COIMG-133" onclick="toggle_me()" style="cursor: pointer;">Characterization of irradiated nuclear transmutation fuel with neutron resonance imaging at LANSCE, </span><span class="author_string" final_id="COIMG-133" onclick="toggle_me()" style="cursor: pointer;">Sven C. Vogel<sup>1</sup>, </span><span class="author_string" final_id="COIMG-133" onclick="toggle_me()" style="cursor: pointer;">Thilo Balke<sup>1</sup>, </span><span class="author_string" final_id="COIMG-133" onclick="toggle_me()" style="cursor: pointer;">Charles A. Bouman<sup>2</sup>, </span><span class="author_string" final_id="COIMG-133" onclick="toggle_me()" style="cursor: pointer;">Luca Capriotti<sup>3</sup>, </span><span class="author_string" final_id="COIMG-133" onclick="toggle_me()" style="cursor: pointer;">Jason M. Harp<sup>4</sup>, </span><span class="author_string" final_id="COIMG-133" onclick="toggle_me()" style="cursor: pointer;">Alexander M. Long<sup>1</sup>, and </span><span class="author_string" final_id="COIMG-133" onclick="toggle_me()" style="cursor: pointer;">Brendt Wohlberg<sup>1</sup></span><span class="author_string" final_id="COIMG-133" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Los Alamos National Laboratory, <sup>2</sup>Purdue University, <sup>3</sup>Idaho National Laboratory, and <sup>4</sup>Oak Ridge National Laboratory (United States)</span><span class="abstract_link" final_id="COIMG-133" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-133" id="abstract-COIMG-133" onclick="toggle_me()" style="display:none; cursor:pointer;">Short pulse neutron sources uniquely enable mapping the densities of isotopes in irradiated nuclear fuels, providing crucial information for the development of advanced fuel forms. We present results on a sample from an irradiated U-10wt%-1wt%Pd metallic fuel in which the densities of uranium isotopes were mapped in moles per cm2 with neutron absorption resonances at the Los Alamos Neutron Science Center (LANSCE). Nuclear burn-up models exist predicting such isotope densities and imaging methods quantifying these with good confidence are of paramount importance to develop this characterization tool and apply it to accelerate carbon-neutral nuclear fuel development. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">11:10<a name="COIMG-134"></a><span style="float: right;">COIMG-134</span> <br> <span class="presentation_title" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Advanced neutron imaging techniques at FRM II, </span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Adrian Losko<sup>1</sup>, </span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Richi Kumar<sup>2</sup>, </span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Alexander M. Long<sup>3</sup>, </span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Tobias Neuwirth<sup>1</sup>, </span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Simon Sebold<sup>1</sup>, </span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Lucas Sommer<sup>1</sup>, </span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Anton Tremsin<sup>4</sup>, </span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Sven C. Vogel<sup>3</sup>, </span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Alexander Wolfertz<sup>1</sup>, and </span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">Michael Schulz<sup>1</sup></span><span class="author_string" final_id="COIMG-134" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Technical University Munich (Germany), <sup>2</sup>Helmholtz-Zentrum Hereon GmbH (Germany), <sup>3</sup>Los Alamos National Laboratory (United States), and <sup>4</sup>University of California, Berkeley (United States)</span><span class="abstract_link" final_id="COIMG-134" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-134" id="abstract-COIMG-134" onclick="toggle_me()" style="display:none; cursor:pointer;">The neutron imaging group at FRM II tries to push the limits of several advanced neutron imaging techniques such as grating interferometry, multi-modal imaging or imaging with polarized neutrons. Due to the limited neutron count rate in such applications, frequently a compromise needs to be made between data quality and measurement time. Moreover, quantitative evaluation is often complicated by background and side effects. In this presentation we will discuss challenges and opportunities for improvements of data quality. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">11:30<a name="COIMG-135"></a><span style="float: right;">COIMG-135</span> <br> <span class="presentation_title" final_id="COIMG-135" onclick="toggle_me()" style="cursor: pointer;">Assessment of imaging properties of scintillators at FP60R for neutron imaging applications, </span><span class="author_string" final_id="COIMG-135" onclick="toggle_me()" style="cursor: pointer;">Showera H. Haque, </span><span class="author_string" final_id="COIMG-135" onclick="toggle_me()" style="cursor: pointer;">Stuart Miller, </span><span class="author_string" final_id="COIMG-135" onclick="toggle_me()" style="cursor: pointer;">Stuart Baker, </span><span class="author_string" final_id="COIMG-135" onclick="toggle_me()" style="cursor: pointer;">Katherine Walters, and </span><span class="author_string" final_id="COIMG-135" onclick="toggle_me()" style="cursor: pointer;">Jesus Castaneda</span><span class="author_string" final_id="COIMG-135" onclick="toggle_me()" style="cursor: pointer;">, Nevada National Security Site (United States)</span><span class="abstract_link" final_id="COIMG-135" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-135" id="abstract-COIMG-135" onclick="toggle_me()" style="display:none; cursor:pointer;">Time-gated neutron radiography provides a non-invasive way to image through dense objects, making it a favorable diagnostic for subcritical experiments and other dynamic experiments. The Nevada National Security Site (NNSS) is investigating using pulsed neutron sources such as those created by a dense plasma focus (DPF) device to produce radiographs of dynamic experiments using deuterium-tritium neutrons. The fast neutron beamline at the WNR facility at LANSCE is used to evaluate the imaging properties of various scintillators to be used on future dynamic experiments. Using the accelerator’s pulsed beam structure, we program our gated imaging camera to trigger repetitively at fixed neutron time-of-flight delays. By integrating many thousands of these 10 ns wide energy bands, we reproduce a dose equivalent to what was produced at experiment. We present results of radiographs of polyethylene rolled edges and tungsten step wedges taken at the WNR Flight Path 60R using plastic slab, crystalline, and deposited scintillators. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">11:50<a name="COIMG-136"></a><span style="float: right;">COIMG-136</span> <br> <span class="presentation_title" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">In-situ thermal neutron imaging of roots in soil at LSU Pennington Lab, </span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">Les Butler<sup>1</sup>, </span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">Kyungmin Ham<sup>1</sup>, </span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">J. Theodore Cremer<sup>2</sup>, </span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">Randall Urdahl<sup>2</sup>, </span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">Eugene Guan<sup>2</sup>, </span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">Craig Brown<sup>2</sup>, </span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">Allan Chen<sup>2</sup>, </span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">Charles Gary<sup>2</sup>, </span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">Michael Vincent<sup>3</sup>, and </span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">Charles Hartman<sup>3</sup></span><span class="author_string" final_id="COIMG-136" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Louisiana State University, <sup>2</sup>Adelphi Technology, Inc., and <sup>3</sup>Refined Imaging LLC (United States)</span><span class="abstract_link" final_id="COIMG-136" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-136" id="abstract-COIMG-136" onclick="toggle_me()" style="display:none; cursor:pointer;">Installed at the LSU Pennington Lab, is the radiographic/tomographic imaging system, which is based on a thermal neutron scatter image of the plant/soil system. Revealing details at 1-micron, this thermal neutron scatter, plant root imaging instrument, combines interferometric grating-optics, developed by Refined Imaging of Baton Rouge, and combined with Adelphi Technology’s portable thermal neutron source. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">12:10<a name="COIMG-137"></a><span style="float: right;">COIMG-137</span> <br> <span class="presentation_title" final_id="COIMG-137" onclick="toggle_me()" style="cursor: pointer;">LSU and MIT reactor collaboration - Simultaneous X-ray/neutron imaging for energy, materials, and ecosystem applications, </span><span class="author_string" final_id="COIMG-137" onclick="toggle_me()" style="cursor: pointer;">Les Butler<sup>1</sup>, </span><span class="author_string" final_id="COIMG-137" onclick="toggle_me()" style="cursor: pointer;">Gerald Schneider<sup>1</sup>, </span><span class="author_string" final_id="COIMG-137" onclick="toggle_me()" style="cursor: pointer;">Markus Bleuel<sup>2</sup>, </span><span class="author_string" final_id="COIMG-137" onclick="toggle_me()" style="cursor: pointer;">Navid Jafari<sup>1</sup>, </span><span class="author_string" final_id="COIMG-137" onclick="toggle_me()" style="cursor: pointer;">Shengmin Guo<sup>1</sup>, </span><span class="author_string" final_id="COIMG-137" onclick="toggle_me()" style="cursor: pointer;">Joyoni Dey<sup>1</sup>, </span><span class="author_string" final_id="COIMG-137" onclick="toggle_me()" style="cursor: pointer;">Boris Khaykovich<sup>3</sup>, and </span><span class="author_string" final_id="COIMG-137" onclick="toggle_me()" style="cursor: pointer;">J. Theodore Cremer<sup>4</sup></span><span class="author_string" final_id="COIMG-137" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Louisiana State University, <sup>2</sup>National Institute of Science and Technology (NIST), <sup>3</sup>Massachusetts Institute of Technology, and <sup>4</sup>Adelphi Technology, Inc (United States)</span><span class="abstract_link" final_id="COIMG-137" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-137" id="abstract-COIMG-137" onclick="toggle_me()" style="display:none; cursor:pointer;">Access to combined X-ray/neutron imaging in the US currently exists only at the NIST Center for Neutron Research BT2 beamline. The proposed LSU-MIT project will double the US capacity and will have a much more powerful X-ray imaging system. The MIT neutron tomography system was recently upgraded by NIST researchers, close to specifications of worldwide national facility neutron imaging beamlines. MIT beamline specifications: 1. Thermal neutron beam size of 50 mm x 75 mm at the sample, small beam angular divergence (L/D = 500); 2. Image acquisition time of 100 seconds with 25 micron pixels. LSU will build the X-ray imaging system, both hardware and software, test and evaluate, then move the system to the MIT neutron imaging beamline. The X-ray system will be two-fold: Mode 1 will operate as a Talbot-Lau interferometer based around a Sigray microarray X-ray source. Mode 2 will use a high-energy X-ray source. An X-ray beam path of nearly 3 meters is available. The 5-D data (XYZ, X-ray/neutron, time) is appropriate for the new Louisiana AI/ML supercomputer system. Efficient analysis of the 5-D requires collaborative virtual reality and uses the VR experience developed by LSU chemistry and psychology faculty. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <br> <br> <p class="event_time">12:30 – 2:00 PM Lunch</p> <div class="pinkcallout"> <p class="session_title">Monday 16 January PLENARY: Neural Operators for Solving PDEs</p> <span class="chair">Session Chair: Robin Jenkin, NVIDIA Corporation (United States)<br> </span> <span class="session_time">2:00 PM – 3:00 PM</span> <br> <span class="room">Cyril Magnin I/II/III<br> </span> <span></span> <p class="session_notes">Deep learning surrogate models have shown promise in modeling complex physical phenomena such as fluid flows, molecular dynamics, and material properties. However, standard neural networks assume finite-dimensional inputs and outputs, and hence, cannot withstand a change in resolution or discretization between training and testing. We introduce Fourier neural operators that can learn operators, which are mappings between infinite dimensional spaces. They are independent of the resolution or grid of training data and allow for zero-shot generalization to higher resolution evaluations. When applied to weather forecasting, neural operators capture fine-scale phenomena and have similar skill as gold-standard numerical weather models for predictions up to a week or longer, while being 4-5 orders of magnitude faster.</p> <br> <span></span> <p class="session_notes"> </p> <span class="author_string"><strong>Anima Anandkumar, </strong>Bren professor, California Institute of Technology, and senior director of AI Research, NVIDIA Corporation (United States)<span class="author_string"></span></span> <p> </p> <span></span> <p class="session_notes">Anima Anandkumar is a Bren Professor at Caltech and Senior Director of AI Research at NVIDIA. She is passionate about designing principled AI algorithms and applying them to interdisciplinary domains. She has received several honors such as the IEEE fellowship, Alfred. P. Sloan Fellowship, NSF Career Award, and Faculty Fellowships from Microsoft, Google, Facebook, and Adobe. She is part of the World Economic Forum's Expert Network. Anandkumar received her BTech from Indian Institute of Technology Madras, her PhD from Cornell University, and did her postdoctoral research at MIT and assistant professorship at University of California Irvine.</p> </div> <br> <p class="event_time">3:00 – 3:30 PM Coffee Break</p> <div class="callout"> <p class="session_title">KEYNOTE: Computational Imaging using Fourier Ptychography and Phase Retrieval (M3)</p> <span class="chair">Session Chairs: Tony Allen, Purdue University (United States) and Andre Van Rynbach, U.S. Air Force (United States)<br> </span><span class="session_time">3:30 – 4:50 PM</span> <br> <span class="room">Market Street </span> <br> <span></span><br> <p class="presentation_time" style="text-align:left;">3:30<a name="COIMG-138"></a><span style="float: right;">COIMG-138</span> <br> <span class="presentation_title" final_id="COIMG-138" onclick="toggle_me()" style="cursor: pointer;">KEYNOTE: Computational phase imaging, </span><span class="author_string" final_id="COIMG-138" onclick="toggle_me()" style="cursor: pointer;">Laura Waller</span><span class="author_string" final_id="COIMG-138" onclick="toggle_me()" style="cursor: pointer;">, University of California, Berkeley (United States)</span><span class="abstract_link" final_id="COIMG-138" onclick="toggle_me()"> [view abstract] </span></p> <p class="session_notes">Laura Waller leads the Computational Imaging Lab, which develops new methods for optical imaging, with optics and computational algorithms designed jointly. She holds the Ted Van Duzer Endowed Professorship and is a Senior Fellow at the Berkeley Institute of Data Science (BIDS), with affiliations in Bioengineering and Applied Sciences & Technology. Laura was a Postdoctoral Researcher and Lecturer of Physics at Princeton University from 2010-2012 and received BS, MEng and PhD degrees from MIT in 2004, 2005 and 2010, respectively. She is a Moore Foundation Data-Driven Investigator, Bakar fellow, Distinguished Graduate Student Mentoring awardee, NSF CAREER awardee, Chan-Zuckerberg Biohub Investigator, SPIE Early Career Achievement Awardee and Packard Fellow.</p> <p class="abstract" final_id="COIMG-138" id="abstract-COIMG-138" onclick="toggle_me()" style="display:none; cursor:pointer;">Scattering severely limits the visual acuity of an imaging system. This talk discusses how diversity in illumination wavelength can be utilized to circumvent the problem of phase randomization in scattered light fields. Amongst other applications, the introduced method allows for holographic measurements of hidden objects around corners and through scattering media, or for interferometric measurements of macroscopic objects with rough surfaces. This is possible as the technique interrogates the scene at two closely spaced optical wavelengths and computationally assembles a complex “synthetic field” at a “synthetic wave,” which is used for further processing. As the synthetic wavelength is the beat wavelength of the two optical wavelengths, it can be picked orders of magnitudes larger, and the computationally assembled synthetic field becomes immune to the deleterious effect of speckle. During the talk, different flavors of the technique will be introduced, using the examples of our latest experimental results. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p> </p> <p class="presentation_time" style="text-align:left;">4:10<a name="COIMG-139"></a><span style="float: right;">COIMG-139</span> <br> <span class="presentation_title" final_id="COIMG-139" onclick="toggle_me()" style="cursor: pointer;">I can see clearly now: Sub-diffraction limit synthetic aperture lidar, </span><span class="author_string" final_id="COIMG-139" onclick="toggle_me()" style="cursor: pointer;">Tony G. Allen<sup>1,</sup><sup>2</sup>, </span><span class="author_string" final_id="COIMG-139" onclick="toggle_me()" style="cursor: pointer;">David J. Rabb<sup>2</sup>, </span><span class="author_string" final_id="COIMG-139" onclick="toggle_me()" style="cursor: pointer;">Gregery T. Buzzard<sup>1</sup>, and </span><span class="author_string" final_id="COIMG-139" onclick="toggle_me()" style="cursor: pointer;">Charles A. Bouman<sup>1</sup></span><span class="author_string" final_id="COIMG-139" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Purdue University and <sup>2</sup>Air Force Research Laboratory (United States)</span><span class="abstract_link" final_id="COIMG-139" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-139" id="abstract-COIMG-139" onclick="toggle_me()" style="display:none; cursor:pointer;">In synthetic aperture lidar (SAL), simultaneous aperture synthesis and pulse compression of a high bandwidth wave enables high-resolution three-dimensional (3D) imaging capabilities. Unfortunately, like other coherent imaging methods, SAL imagery is subject to noise and speckle. Many methods that aim to reduce these effects operate on the average of images reconstructed from sub-apertures, and hence come at a cost of resolution. Here, we present a model-based image reconstruction algorithm that uses multi-agent consensus equilibrium (MACE) as a framework for creating a joint reconstruction of the 3D image from data collected over the entire synthetic aperture. We show that this approach is able to reconstruct images with suppressed noise and speckle while preserving resolution beyond the diffraction limit. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p> </p> <p class="presentation_time" style="text-align:left;">4:30<a name="COIMG-140"></a><span style="float: right;">COIMG-140</span> <br> <span class="presentation_title" final_id="COIMG-140" onclick="toggle_me()" style="cursor: pointer;">The role of phase retrieval for imaging and beam forming through turbulence, </span><span class="author_string" final_id="COIMG-140" onclick="toggle_me()" style="cursor: pointer;">Timothy J. Schulz<sup>1</sup> and </span><span class="author_string" final_id="COIMG-140" onclick="toggle_me()" style="cursor: pointer;">David J. Brady<sup>2</sup></span><span class="author_string" final_id="COIMG-140" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Michigan Technological University and <sup>2</sup>The University of Arizona (United States)</span><span class="abstract_link" final_id="COIMG-140" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-140" id="abstract-COIMG-140" onclick="toggle_me()" style="display:none; cursor:pointer;">In this presentation, we'll discuss the upper bounds on performance that phase retrieval can provide for imaging and beam-forming through a turbulent medium. We'll begin by examining performance limits when the medium is perfectly known—as in the presence of an oracle—which provide upper bounds on the performance of any system. Then we'll discuss the ways phase-retrieval can characterize the medium, and show how those methods can advance performance toward the ultimate bounds. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p> </p> </div> <br> <br> <br> <span> </span> <div class="pinkcallout"> <p class="session_title">EI 2023 Highlights Session</p> <span class="chair">Session Chair: Robin Jenkin, NVIDIA Corporation (United States)<br> </span> <span class="session_time">3:30 – 5:00 PM</span><br> <span class="room">Cyril Magnin II<br> </span> <p class="session_notes">Join us for a session that celebrates the breadth of what EI has to offer with short papers selected from EI conferences. </p> <p class="session_notes">NOTE: The EI-wide "EI 2023 Highlights" session is concurrent with Monday afternoon COIMG, COLOR, IMAGE, and IQSP conference sessions. </p> <p> </p> <p class="presentation_time" style="text-align:left;"> <span>IQSP-309</span> <br> <span class="presentation_title" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Evaluation of image quality metrics designed for DRI tasks with automotive cameras, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Valentine Klein, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Yiqi LI, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Claudio Greco, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Laurent Chanas, and </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Frédéric Guichard</span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">, DXOMARK (France)</span><span class="abstract_link" final_id="IQSP-309" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-309" id="abstract-IQSP-309" onclick="toggle_me()" style="display:none; cursor:pointer;">Driving assistance is increasingly used in new car models. Most driving assistance systems are based on automotive cameras and computer vision. Computer Vision, regardless of the underlying algorithms and technology, requires the images to have good image quality, defined according to the task. This notion of good image quality is still to be defined in the case of computer vision as it has very different criteria than human vision: humans have a better contrast detection ability than image chains. The aim of this article is to compare three different metrics designed for detection of objects with computer vision: the Contrast Detection Probability (CDP) [1, 2, 3, 4], the Contrast Signal to Noise Ratio (CSNR) [5] and the Frequency of Correct Resolution (FCR) [6]. For this purpose, the computer vision task of reading the characters on a license plate will be used as a benchmark. The objective is to check the correlation between the objective metric and the ability of a neural network to perform this task. Thus, a protocol to test these metrics and compare them to the output of the neural network has been designed and the pros and cons of each of these three metrics have been noted.</p> <p> </p> <p class="presentation_time" style="text-align:left;"><span>SD&A-224</span> <br> <span class="presentation_title" final_id="SD&A-224" onclick="toggle_me()" style="cursor: pointer;">Human performance using stereo 3D in a helmet mounted display and association with individual stereo acuity, </span><span class="author_string" final_id="SD&A-224" onclick="toggle_me()" style="cursor: pointer;">Bonnie Posselt</span><span class="author_string" final_id="SD&A-224" onclick="toggle_me()" style="cursor: pointer;">, RAF Centre of Aviation Medicine (United Kingdom)</span><span class="abstract_link" final_id="SD&A-224" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="SD&A-224" id="abstract-SD&A-224" onclick="toggle_me()" style="display:none; cursor:pointer;">Binocular Helmet Mounted Displays (HMDs) are a critical part of the aircraft system, allowing information to be presented to the aviator with stereoscopic 3D (S3D) depth, potentially enhancing situational awareness and improving performance. The utility of S3D in an HMD may be linked to an individual’s ability to perceive changes in binocular disparity (stereo acuity). Though minimum stereo acuity standards exist for most military aviators, current test methods may be unable to characterise this relationship. This presentation will investigate the effect of S3D on performance when used in a warning alert displayed in an HMD. Furthermore, any effect on performance, ocular symptoms, and cognitive workload shall be evaluated in regard to individual stereo acuity measured with a variety of paper-based and digital stereo tests.</p> <p> </p> <p class="presentation_time" style="text-align:left;"> <span>IMAGE-281</span> <br> <span class="presentation_title" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Smartphone-enabled point-of-care blood hemoglobin testing with color accuracy-assisted spectral learning, </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Sang Mok Park<sup>1</sup>, </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Yuhyun Ji<sup>1</sup>, </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Semin Kwon<sup>1</sup>, </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Andrew R. O’Brien<sup>2</sup>, </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Ying Wang<sup>2</sup>, and </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Young L. Kim<sup>1</sup></span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Purdue University and <sup>2</sup>Indiana University School of Medicine (United States)</span><span class="abstract_link" final_id="IMAGE-281" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IMAGE-281" id="abstract-IMAGE-281" onclick="toggle_me()" style="display:none; cursor:pointer;">We develop an mHealth technology for noninvasively measuring blood Hgb levels in patients with sickle cell anemia, using the photos of peripheral tissue acquired by the built-in camera of a smartphone. As an easily accessible sensing site, the inner eyelid (i.e., palpebral conjunctiva) is used because of the relatively uniform microvasculature and the absence of skin pigments. Color correction (color reproduction) and spectral learning (spectral super-resolution spectroscopy) algorithms are integrated for accurate and precise mHealth blood Hgb testing. First, color correction using a color reference chart with multiple color patches extracts absolute color information of the inner eyelid, compensating for smartphone models, ambient light conditions, and data formats during photo acquisition. Second, spectral learning virtually transforms the smartphone camera into a hyperspectral imaging system, mathematically reconstructing high-resolution spectra from color-corrected eyelid images. Third, color correction and spectral learning algorithms are combined with a spectroscopic model for blood Hgb quantification among sickle cell patients. Importantly, single-shot photo acquisition of the inner eyelid using the color reference chart allows straightforward, real-time, and instantaneous reading of blood Hgb levels. Overall, our mHealth blood Hgb tests could potentially be scalable, robust, and sustainable in resource-limited and homecare settings.</p> <p> </p> <p class="presentation_time" style="text-align:left;"> <span>AVM-118</span> <br> <span class="presentation_title" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Designing scenes to quantify the performance of automotive perception systems, </span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Zhenyi Liu<sup>1</sup>, </span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Devesh Shah<sup>2</sup>, </span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Alireza Rahimpour<sup>2</sup>, </span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Joyce Farrell<sup>1</sup>, and </span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Brian Wandell<sup>1</sup></span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Stanford University and <sup>2</sup>Ford Motor Company (United States)</span><span class="abstract_link" final_id="AVM-118" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="AVM-118" id="abstract-AVM-118" onclick="toggle_me()" style="display:none; cursor:pointer;">We implemented an end-to-end simulation for perception systems, based on cameras, that are used in automotive applications. The open-source software creates complex driving scenes and simulates cameras that acquire images of these scenes. The camera images are then used by a neural network in the perception system to identify the locations of scene objects, providing the results as input to the decision system. In this paper, we design collections of test scenes that can be used to quantify the perception system’s performance under a range of (a) environmental conditions (object distance, occlusion ratio, lighting levels), and (b) camera parameters (pixel size, lens type, color filter array). We are designing scene collections to analyze performance for detecting vehicles, traffic signs and vulnerable road users in a range of environmental conditions and for a range of camera parameters. With experience, such scene collections may serve a role similar to that of standardized test targets that are used to quantify camera image quality (e.g., acuity, color).</p> <p> </p> <p class="presentation_time" style="text-align:left;"> <span>VDA-403</span> <br> <span class="presentation_title" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Visualizing and monitoring the process of injection molding, </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Christian A. Steinparz<sup>1</sup>, </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Thomas Mitterlehner<sup>2</sup>, </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Bernhard Praher<sup>2</sup>, </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Klaus Straka<sup>1,</sup><sup>2</sup>, </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Holger Stitz<sup>1,</sup><sup>3</sup>, and </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Marc Streit<sup>1,</sup><sup>3</sup></span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Johannes Kepler University, <sup>2</sup>Moldsonics GmbH, and <sup>3</sup>datavisyn GmbH (Austria)</span><span class="abstract_link" final_id="VDA-403" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="VDA-403" id="abstract-VDA-403" onclick="toggle_me()" style="display:none; cursor:pointer;">In injection molding machines the molds are rarely equipped with sensor systems. The availability of non-invasive ultrasound-based in-mold sensors provides better means for guiding operators of injection molding machines throughout the production process. However, existing visualizations are mostly limited to plots of temperature and pressure over time. In this work, we present the result of a design study created in collaboration with domain experts. The resulting prototypical application uses real-world data taken from live ultrasound sensor measurements for injection molding cavities captured over multiple cycles during the injection process. Our contribution includes a definition of tasks for setting up and monitoring the machines during the process, and the corresponding web-based visual analysis tool addressing these tasks. The interface consists of a multi-view display with various levels of data aggregation that is updated live for newly streamed data of ongoing injection cycles.</p> <p> </p> <p class="presentation_time" style="text-align:left;"> <span>COIMG-155</span> <br> <span class="presentation_title" final_id="COIMG-155" onclick="toggle_me()" style="cursor: pointer;">Commissioning the James Webb Space Telescope, </span><span class="author_string" final_id="COIMG-155" onclick="toggle_me()" style="cursor: pointer;">Joseph M. Howard</span><span class="author_string" final_id="COIMG-155" onclick="toggle_me()" style="cursor: pointer;">, NASA Goddard Space Flight Center (United States)</span><span class="abstract_link" final_id="COIMG-155" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-155" id="abstract-COIMG-155" onclick="toggle_me()" style="display:none; cursor:pointer;">Astronomy is arguably in a golden age, where current and future NASA space telescopes are expected to contribute to this rapid growth in understanding of our universe. The most recent addition to our space-based telescopes dedicated to astronomy and astrophysics is the James Webb Space Telescope (JWST), which launched on 25 December 2021. This talk will discuss the first six months in space for JWST, which were spent commissioning the observatory with many deployments, alignments, and system and instrumentation checks. These engineering activities help verify the proper working of the telescope prior to commencing full science operations. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p> </p> <p class="presentation_time" style="text-align:left;"> <span>HVEI-223</span> <br> <span class="presentation_title" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">Critical flicker frequency (CFF) at high luminance levels, </span><span class="author_string" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">Alexandre Chapiro<sup>1</sup>, </span><span class="author_string" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">Nathan Matsuda<sup>1</sup>, </span><span class="author_string" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">Maliha Ashraf<sup>2</sup>, and </span><span class="author_string" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">Rafal Mantiuk<sup>3</sup></span><span class="author_string" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Meta (United States), <sup>2</sup>University of Liverpool (United Kingdom), and <sup>3</sup>University of Cambridge (United Kingdom)</span><span class="abstract_link" final_id="HVEI-223" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="HVEI-223" id="abstract-HVEI-223" onclick="toggle_me()" style="display:none; cursor:pointer;">The critical flicker fusion (CFF) is the frequency of changes at which a temporally periodic light will begin to appear completely steady to an observer. This value is affected by several visual factors, such as the luminance of the stimulus or its location on the retina. With new high dynamic range (HDR) displays, operating at higher luminance levels, and virtual reality (VR) displays, presenting at wide fields-of-view, the effective CFF may change significantly from values expected for traditional presentation. In this work we use a prototype HDR VR display capable of luminances up to 20,000 cd/m^2 to gather a novel set of CFF measurements for never before examined levels of luminance, eccentricity, and size. Our data is useful to study the temporal behavior of the visual system at high luminance levels, as well as setting useful thresholds for display engineering.</p> <p> </p> <p class="presentation_time" style="text-align:left;"> <span>HPCI-228</span> <br> <span class="presentation_title" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Physics guided machine learning for image-based material decomposition of tissues from simulated breast models with calcifications, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Muralikrishnan Gopalakrishnan Meena<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Amir K. Ziabari<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Singanallur Venkatakrishnan<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Isaac R. Lyngaas<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Matthew R. Norman<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Balint Joo<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Thomas L. Beck<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Charles A. Bouman<sup>2</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Anuj Kapadia<sup>1</sup>, and </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Xiao Wang<sup>1</sup></span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Oak Ridge National Laboratory and <sup>2</sup>Purdue University (United States)</span><span class="abstract_link" final_id="HPCI-228" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="HPCI-228" id="abstract-HPCI-228" onclick="toggle_me()" style="display:none; cursor:pointer;">Material decomposition of Computed Tomography (CT) scans using projection-based approaches, while highly accurate, poses a challenge for medical imaging researchers and clinicians due to limited or no access to projection data. We introduce a deep learning image-based material decomposition method guided by physics and requiring no access to projection data. The method is demonstrated to decompose tissues from simulated dual-energy X-ray CT scans of virtual human phantoms containing four materials - adipose, fibroglandular, calcification, and air. The method uses a hybrid unsupervised and supervised learning technique to tackle the material decomposition problem. We take advantage of the unique X-ray absorption rate of calcium compared to body tissues to perform a preliminary segmentation of calcification from the images using unsupervised learning. We then perform supervised material decomposition using a deep learned UNET model which is trained using GPUs in the high-performant systems at the Oak Ridge Leadership Computing Facility. The method is demonstrated on simulated breast models to decompose calcification, adipose, fibroglandular, and air.</p> <p> </p> <p class="presentation_time" style="text-align:left;"> <span>3DIA-104</span> <br> <span class="presentation_title" final_id="3DIA-104" onclick="toggle_me()" style="cursor: pointer;">Layered view synthesis for general images, </span><span class="author_string" final_id="3DIA-104" onclick="toggle_me()" style="cursor: pointer;">Loïc Dehan, </span><span class="author_string" final_id="3DIA-104" onclick="toggle_me()" style="cursor: pointer;">Wiebe Van Ranst, and </span><span class="author_string" final_id="3DIA-104" onclick="toggle_me()" style="cursor: pointer;">Patrick Vandewalle</span><span class="author_string" final_id="3DIA-104" onclick="toggle_me()" style="cursor: pointer;">, Katholieke University Leuven (Belgium)</span><span class="abstract_link" final_id="3DIA-104" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="3DIA-104" id="abstract-3DIA-104" onclick="toggle_me()" style="display:none; cursor:pointer;">We describe a novel method for monocular view synthesis. The goal of our work is to create a visually pleasing set of horizontally spaced views based on a single image. This can be applied in view synthesis for virtual reality and glasses-free 3D displays. Previous methods produce realistic results on images that show a clear distinction between a foreground object and the background. We aim to create novel views in more general, crowded scenes in which there is no clear distinction. Our main contributions are a computationally efficient method for realistic occlusion inpainting and blending, especially in complex scenes. Our method can be effectively applied to any image, which is shown both qualitatively and quantitatively on a large dataset of stereo images. Our method performs natural disocclusion inpainting and maintains the shape and edge quality of foreground objects.</p> <p> </p> <p class="presentation_time" style="text-align:left;"> <span>ISS-329</span> <br> <span class="presentation_title" final_id="ISS-329" onclick="toggle_me()" style="cursor: pointer;">A self-powered asynchronous image sensor with independent in-pixel harvesting and sensing operations, </span><span class="author_string" final_id="ISS-329" onclick="toggle_me()" style="cursor: pointer;">Ruben Gomez-Merchan, </span><span class="author_string" final_id="ISS-329" onclick="toggle_me()" style="cursor: pointer;">Juan Antonio Leñero-Bardallo, and </span><span class="author_string" final_id="ISS-329" onclick="toggle_me()" style="cursor: pointer;">Ángel Rodríguez-Vázquez</span><span class="author_string" final_id="ISS-329" onclick="toggle_me()" style="cursor: pointer;">, University of Seville (Spain)</span><span class="abstract_link" final_id="ISS-329" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="ISS-329" id="abstract-ISS-329" onclick="toggle_me()" style="display:none; cursor:pointer;">A new self-powered asynchronous sensor with a novel pixel architecture is presented. Pixels are autonomous and can harvest or sense energy independently. During the image acquisition, pixels toggle to a harvesting operation mode once they have sensed their local illumination level. With the proposed pixel architecture, most illuminated pixels provide an early contribution to power the sensor, while low illuminated ones spend more time sensing their local illumination. Thus, the equivalent frame rate is higher than the offered by conventional self-powered sensors that harvest and sense illumination in independient phases. The proposed sensor uses a Time-to-First-Spike readout that allows trading between image quality and data and bandwidth consumption. The sensor has HDR operation with a dynamic range of 80 dB. Pixel power consumption is only 70 pW. In the article, we describe the sensor’s and pixel’s architectures in detail. Experimental results are provided and discussed. Sensor specifications are benchmarked against the art.</p> <p> </p> <p class="presentation_time" style="text-align:left;"><span>COLOR-184</span> <br> <span class="presentation_title" final_id="COLOR-184" onclick="toggle_me()" style="cursor: pointer;">Color blindness and modern board games, </span><span class="author_string" final_id="COLOR-184" onclick="toggle_me()" style="cursor: pointer;">Alessandro Rizzi<sup>1</sup> and </span><span class="author_string" final_id="COLOR-184" onclick="toggle_me()" style="cursor: pointer;">Matteo Sassi<sup>2</sup></span><span class="author_string" final_id="COLOR-184" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Università degli Studi di Milano and <sup>2</sup>consultant (Italy)</span><span class="abstract_link" final_id="COLOR-184" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COLOR-184" id="abstract-COLOR-184" onclick="toggle_me()" style="display:none; cursor:pointer;">Board game industry is experiencing a strong renewed interest. In the last few years, about 4000 new board games have been designed and distributed each year. Board game players gender balance is reaching the equality, but nowadays the male component is a slight majority. This means that (at least) around 10% of board game players are color blind. How does the board game industry deal with this ? Recently, a raising of awareness in the board game design has started but so far there is a big gap compared with (e.g.) the computer game industry. This paper presents some data about the actual situation, discussing exemplary cases of successful board games.</p> <p> </p> <script> function toggle_me() { var elm = event.srcElement var final_id = elm.getAttribute("final_id") var the_id = "abstract-" + final_id; var x = document.getElementById(the_id); if (x.style.display === "none"){ x.style.display = "block"; } else { x.style.display = "none"; } }</script> </div> <br> <span> </span> <p class="event_time">5:00 – 6:15 PM EI 2023 All-Conference Welcome Reception (in the Cyril Magnin Foyer)</p> <p class="date">Tuesday 17 January 2023</p> <p class="session_title">Neutron Imaging Beyond Traditional Radiography (T1)</p> <span class="chair_label">Session Chairs: </span> <span class="chair">Alexander Long, Los Alamos National Laboratory (United States) and Sven Vogel, Los Alamos National Laboratory (United States)<br> </span> <span class="session_time">8:50 – 10:10 AM</span> <br> <span class="room">Market Street<br> </span> <p class="presentation_time" style="text-align:left;">8:50<a name="COIMG-141"></a><span style="float: right;">COIMG-141</span> <br> <span class="presentation_title" final_id="COIMG-141" onclick="toggle_me()" style="cursor: pointer;">Neutron Bragg-edge/dip imaging with least squares method and machine learning, </span><span class="author_string" final_id="COIMG-141" onclick="toggle_me()" style="cursor: pointer;">Hirotaka Sato</span><span class="author_string" final_id="COIMG-141" onclick="toggle_me()" style="cursor: pointer;">, Hokkaido University (Japan)</span><span class="abstract_link" final_id="COIMG-141" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-141" id="abstract-COIMG-141" onclick="toggle_me()" style="display:none; cursor:pointer;">Neutron Bragg-edge and Bragg-dip imaging methods, which are energy-resolved neutron transmission imaging methods, are attractive imaging technologies that can non-destructively/invasively visualize bulk information of crystalline microstructure in polycrystalline materials, oligocrystals and single crystals. The visualization area is several-centimeters squared to approximately 10 cm squared. The spatial resolution is sub-millimeters to several-millimeters. Neutrons can easily penetrate a material of several-centimeters thickness. The crystallographic analysis with such spatial recognition ability can be carried out by this method. This method is recently developing owing to neutron time-of-flight (TOF) imaging detectors at particle accelerator-driven pulsed white neutron sources, and optical devices for neutron energy selection at nuclear reactor-based steady-state white neutron sources. For quantitative analysis of crystallographic information, it is required to extract the information from energy-dependent neutron transmission data, neutron transmission spectrum, measured at each pixel of a neutron imaging detector. Therefore, various least squares data analysis methods have been developed for extracting the information of crystalline phase fractions, crystal orientations, grain/crystallite sizes and macro/micro-strains. On the other hand, completely performing such data analyses at all pixels of the imaging detector is a hard work. Thus, several machine learning-assisted data analysis methods have been also developed. As a result, various materials engineering, industrial applications and cultural heritage research using neutron Bragg-edge/dip imaging have been successfully carried out. In this presentation, recent progresses of neutron Bragg-edge/dip imaging with least squares method and machine learning and its applications are reported. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">9:10<a name="COIMG-143"></a><span style="float: right;">COIMG-143</span> <br> <span class="presentation_title" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Event mode data collection for neutron imaging applications, </span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Adrian Losko<sup>1</sup>, </span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Jason Gochanour<sup>2</sup>, </span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Alex Gustschin<sup>1</sup>, </span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Yiyong Han<sup>1</sup>, </span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Alexander M. Long<sup>2</sup>, </span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Manuel Morgano<sup>3</sup>, </span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Michael Schulz<sup>1</sup>, </span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Anton Tremsin<sup>4</sup>, </span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Sven C. Vogel<sup>2</sup>, and </span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">Alexander Wolfertz<sup>1</sup></span><span class="author_string" final_id="COIMG-143" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Technical University Munich (Germany), <sup>2</sup>Los Alamos National Laboratory (United States), <sup>3</sup>European Spallation Source (Sweden), and <sup>4</sup>University of California, Berkeley (United States)</span><span class="abstract_link" final_id="COIMG-143" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-143" id="abstract-COIMG-143" onclick="toggle_me()" style="display:none; cursor:pointer;">With the transformative development of event-based detectors, new perspectives for detection systems for various types of radiation were opened up. A recently developed event-driven imaging system based on Timepix3 sensor technology is capable of observing and time-stamping the optical signal induced by particle interactions in scintillator materials with nanosecond temporal and micrometer spatial resolution, providing a pathway to fuse the benefits of integrating camera type with counting type detectors. In this approach, the reconstruction of the interaction position of a neutron with the scintillator with sub-pixel accuracy can provide a precise determination in location, as well as in time-of-arrival of the individual neutrons. Utilizing such a principle, it was shown that spatial and temporal resolution can be improved beyond the classical limits of “regular” neutron imaging. Additionally, a significant increase of signal-to-noise ratio was achieved using the unique potential of event-mode detection to discriminate gamma background from neutron signal based on the spatiotemporal signature of single neutron events produced in the scintillator. Utilizing such sparse data collection, many advantages compared to regular imaging systems emerge and require redesign of existing image processing routines to benefit from this new approach. Here, we present the recent progress in this development with applications to white beam and time-of-flight neutron imaging. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">9:30<a name="COIMG-144"></a><span style="float: right;">COIMG-144</span> <br> <span class="presentation_title" final_id="COIMG-144" onclick="toggle_me()" style="cursor: pointer;">Recent developments on diffraction-based and polarized neutron imaging modalities, </span><span class="author_string" final_id="COIMG-144" onclick="toggle_me()" style="cursor: pointer;">Søren Schmidt<sup>1</sup>, </span><span class="author_string" final_id="COIMG-144" onclick="toggle_me()" style="cursor: pointer;">Patrick Tung<sup>2</sup>, </span><span class="author_string" final_id="COIMG-144" onclick="toggle_me()" style="cursor: pointer;">Stavros Samothrakitis<sup>3</sup>, </span><span class="author_string" final_id="COIMG-144" onclick="toggle_me()" style="cursor: pointer;">Camilla B. Larsen<sup>3</sup>, </span><span class="author_string" final_id="COIMG-144" onclick="toggle_me()" style="cursor: pointer;">Markus Strobl<sup>3</sup>, </span><span class="author_string" final_id="COIMG-144" onclick="toggle_me()" style="cursor: pointer;">Luise T. Kuhn<sup>4</sup>, </span><span class="author_string" final_id="COIMG-144" onclick="toggle_me()" style="cursor: pointer;">Ryoji Kiyanagi<sup>5</sup>, and </span><span class="author_string" final_id="COIMG-144" onclick="toggle_me()" style="cursor: pointer;">Takenao Shinohara<sup>5</sup></span><span class="author_string" final_id="COIMG-144" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>European Spallation Source ERIC (Sweden), <sup>2</sup>University of New South Wales (Australia), <sup>3</sup>Paul Scherrer Institute (Switzerland), <sup>4</sup>Technical University of Denmark (Denmark), and <sup>5</sup>Japan Proton Accelerator Research Complex (J-PARC) Center (Japan)</span><span class="abstract_link" final_id="COIMG-144" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-144" id="abstract-COIMG-144" onclick="toggle_me()" style="display:none; cursor:pointer;">Various modalities on diffraction-based neutron imaging for spatially resolved crystalline microstructures and polarized neutron imaging for measuring 3D magnetic fields have emerged recently. The talk will briefly outline a few methodologies along with examples. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <br> <br> <p class="event_time">10:00 AM – 7:30 PM Industry Exhibition - Tuesday (in the Cyril Magnin Foyer)</p> <p class="event_time">10:20 – 10:50 AM Coffee Break</p> <p class="session_title">Neutron Imaging Beyond Traditional Radiography (T2)</p> <span class="chair_label">Session Chairs: </span> <span class="chair">Alexander Long, Los Alamos National Laboratory (United States) and Sven Vogel, Los Alamos National Laboratory (United States)<br> </span> <span class="session_time">10:50 – 11:50 AM</span> <br> <span class="room">Market Street<br> </span> <p class="presentation_time" style="text-align:left;">10:50<a name="COIMG-145"></a><span style="float: right;">COIMG-145</span> <br> <span class="presentation_title" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">Strain tomography using neutrons, </span><span class="author_string" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">Christopher M. Wensrich<sup>1</sup>, </span><span class="author_string" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">Alexander W. Gregg<sup>1</sup>, </span><span class="author_string" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">Johannes N. Hendriks<sup>1</sup>, </span><span class="author_string" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">Anton Tremsin<sup>2</sup>, </span><span class="author_string" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">Adrian Wills<sup>1</sup>, </span><span class="author_string" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">Takenao Shinohara<sup>3</sup>, </span><span class="author_string" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">Oliver Kirstein<sup>1</sup>, </span><span class="author_string" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">Vladimir Luzin<sup>1</sup>, and </span><span class="author_string" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">Erich H. Kisi<sup>1</sup></span><span class="author_string" final_id="COIMG-145" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>University of Newcastle (Australia), <sup>2</sup>University of California, Berkeley (United States), and <sup>3</sup>Japan Proton Accelerator Research Complex (J-PARC) Center (Japan)</span><span class="abstract_link" final_id="COIMG-145" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-145" id="abstract-COIMG-145" onclick="toggle_me()" style="display:none; cursor:pointer;">Bragg-edge strain imaging presents a natural rich-tomography problem focused on the reconstruction of strain distributions within polycrystalline solids. This problem has been the focus of some attention over the past decade or more following several successful demonstrations in axisymmetric systems. The extension of this work to general two and three-dimensional systems was hampered by the fact that the relevant inverse-problem was inherently ill-posed. This presentation provides an overview of a new approach to this problem based on Gaussian Process regression that involves the application of known constraints such as equilibrium and boundary conditions to the tomographic reconstruction of strain fields. Experimental demonstrations of the technique in two and three dimensions are provided. The latter involved the reconstruction of a 3D strain field within a small steel-aluminum ‘cube-and-plug’ from a set of 70 Bragg-edge strain images measured using the RADEN instrument at J-PARC in Japan in conjunction with an MCP/timepix detector. Comparisons are made to traditional point-wise strain scans from the KOWARI instrument at the Australian Centre for Neutron Scattering and a finite element model. Opportunities and challenges for the wider application of the technique will also be discussed. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">11:10<a name="COIMG-146"></a><span style="float: right;">COIMG-146</span> <br> <span class="presentation_title" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Data processing for non-destructive studies of material properties through energy resolved neutron imaging, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Anton Tremsin<sup>1</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Winfried Kockelmann<sup>2</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Daniel Pooley<sup>2</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Saurabh Kabra<sup>2</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Takenao Shinohara<sup>3</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Kenichi Oikawa<sup>3</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Hassina Z. Bilheux<sup>4</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Jean-Christophe Bilheux<sup>4</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Adrian Losko<sup>5</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Sven C. Vogel<sup>6</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Alexander M. Long<sup>6</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">John Rakovan<sup>7</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Christopher M. Wensrich<sup>8</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Florencia Malamud<sup>9</sup>, </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Markus Strobl<sup>9</sup>, and </span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">Javier Santisteban<sup>10</sup></span><span class="author_string" final_id="COIMG-146" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>University of California, Berkeley (United States), <sup>2</sup>STFC-Rutherford Appleton Laboratory (United Kingdom), <sup>3</sup>Japan Proton Accelerator Research Complex (J-PARC) Center (Japan), <sup>4</sup>Oak Ridge National Laboratory (United States), <sup>5</sup>Forschungs-Neutronenquelle Heinz Maier-Leibnitz (Germany), <sup>6</sup>Los Alamos National Laboratory (United States), <sup>7</sup>Miami University (United States), <sup>8</sup>University of Newcastle (Australia), <sup>9</sup>Paul Scherrer Institut (PSI) (Switzerland), and <sup>10</sup>Comisión Nacional de Energía Atómica CNEA/CONICET (Argentina)</span><span class="abstract_link" final_id="COIMG-146" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-146" id="abstract-COIMG-146" onclick="toggle_me()" style="display:none; cursor:pointer;">Recent development of various modes of energy resolved neutron imaging dramatically enhanced the capabilities of many non-destructive testing studies far beyond conventional radiography. The possibility to measure the neutron transmission spectrum in each pixel of imaging dataset enables many new experimental capabilities, among which are spatially-resolved characterization of microstructure and elemental composition within crystalline materials, in-situ diagnostics of crystal growth, temperature mapping of enclosed materials and many others. The reconstruction of these properties requires sometimes quite substantial data processing and analysis in order to reconstruct the needed maps and distributions with sub 100 µm resolution over several cm area. These experiments are enabled by the development of two crucial components: bright pulsed neutron sources and fast neutron counting detection technology. In this paper we will present results of several experimental studies where extensive computation was involved in reconstruction of required maps of material parameters. Reconstruction of residual strain, temperature distribution, elemental composition with spatial resolution as low as 55 µm in some cases will be presented for various engineering materials and assemblies, single crystal scintillators, unique geological samples and others. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <p class="presentation_time" style="text-align:left;">11:30<a name="COIMG-148"></a><span style="float: right;">COIMG-148</span> <br> <span class="presentation_title" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">Neutron imaging at LANSCE: Characterizing materials for the next generation of nuclear reactor designs, </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">Alexander M. Long, </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">Sven C. Vogel, </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">James Torres, </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">D. Travis Carver, </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">S. Scott Parker, </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">Marisa Monreal, </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">J. Matthew Jackson, </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">Holly Trellue, </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">Aditya Shivprasad, </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">Caitlin Taylor, and </span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">Erik Luther</span><span class="author_string" final_id="COIMG-148" onclick="toggle_me()" style="cursor: pointer;">, Los Alamos National Laboratory (United States)</span><span class="abstract_link" final_id="COIMG-148" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-148" id="abstract-COIMG-148" onclick="toggle_me()" style="display:none; cursor:pointer;">Neutrons are an ideal probe for characterizing nuclear fuels and moderator materials for next generation nuclear reactors as their interactions with matter create complex attenuations that result in a unique combination of isotopic specific contrast mechanisms and penetrabilities, thus making neutrons well suited for investigating both high-z materials (actinides in nuclear fuels) and low-z materials (metal hydrides). Furthermore, the high material penetrability with neutron imaging allows for in-situ measurements at extreme conditions (high temperatures or activity) where bulky sample environments are required. Presented work will include the ongoing efforts at the Los Alamos Neutron Science Center (LANSCE) to develop advanced neutron imaging capabilities on Flight Path 5 (FP5) specifically for characterizing materials for advanced reactor designs. These efforts range from thermophysical property measurements of chloride-based molten salts, to hydrogen characterization in metal hydrides moderator materials, to post-irradiation examination with energy resolved neutron imaging of actinides in fresh and irradiated fuels. For the session: Neutron Imaging Beyond Traditional Radiography.</p> <p> </p> <br> <br> <p class="event_time">12:30 – 2:00 PM Lunch</p> <div class="pinkcallout"> <p class="session_title">Tuesday 17 January PLENARY: Embedded Gain Maps for Adaptive Display of High Dynamic Range Images</p> <span class="chair">Session Chair: Robin Jenkin, NVIDIA Corporation (United States)<br> </span> <span class="session_time">2:00 PM – 3:00 PM</span> <br> <span class="room">Cyril Magnin I/II/III<br> </span> <span></span> <p class="session_notes">Images optimized for High Dynamic Range (HDR) displays have brighter highlights and more detailed shadows, resulting in an increased sense of realism and greater impact. However, a major issue with HDR content is the lack of consistency in appearance across different devices and viewing environments. There are several reasons, including varying capabilities of HDR displays and the different tone mapping methods implemented across software and platforms. Consequently, HDR content authors can neither control nor predict how their images will appear in other apps.</p> <span></span> <p class="session_notes">We present a flexible system that provides consistent and adaptive display of HDR images. Conceptually, the method combines both SDR and HDR renditions within a single image and interpolates between the two dynamically at display time. We compute a Gain Map that represents the difference between the two renditions. In the file, we store a Base rendition (either SDR or HDR), the Gain Map, and some associated metadata. At display time, we combine the Base image with a scaled version of the Gain Map, where the scale factor depends on the image metadata, the HDR capacity of the display, and the viewing environment. </p> <br> <span></span> <p class="session_notes"> </p> <span class="author_string"><strong>Eric Chan, </strong>Fellow, Adobe Inc. (United States)<span class="author_string"></span></span> <p> </p> <span></span> <p class="session_notes">Eric Chan is a Fellow at Adobe, where he develops software for editing photographs. Current projects include Photoshop, Lightroom, Camera Raw, and Digital Negative (DNG). When not writing software, Chan enjoys spending time at his other keyboard, the piano. He is an enthusiastic nature photographer and often combines his photo activities with travel and hiking.</p> <br> <span class="author_string"><strong>Paul M. Hubel, </strong>director of Image Quality in Software Engineering, Apple Inc. (United States)<span class="author_string"></span></span> <p> </p> <span></span> <p class="session_notes">Paul M. Hubel is director of Image Quality in Software Engineering at Apple. He has worked on computational photography and image quality of photographic systems for many years on all aspects of the imaging chain, particularly for iPhone. He trained in optical engineering at University of Rochester, Oxford University, and MIT, and has more than 50 patents on color imaging and camera technology. Hubel is active on the ISO-TC42 committee Digital Photography, where this work is under discussion, and is currently a VP on the IS&T Board. Outside work he enjoys photography, travel, cycling, coffee roasting, and plays trumpet in several bay area ensembles.</p> </div> <br> <p class="event_time">3:00 – 3:30 PM Coffee Break</p> <p class="session_title">Computational Imaging using Fourier Ptychography and Phase Retrieval (T3)</p> <span class="chair_label">Session Chairs: </span> <span class="chair">Tony Allen, Purdue University (United States) and Andre Van Rynbach, U.S. Air Force (United States)<br> </span> <span class="session_time">3:30 – 5:30 PM</span> <br> <span class="room">Market Street<br> </span> <p class="presentation_time" style="text-align:left;">3:30<a name="COIMG-150"></a><span style="float: right;">COIMG-150</span> <br> <span class="presentation_title" final_id="COIMG-150" onclick="toggle_me()" style="cursor: pointer;">Scatter ptychography, </span><span class="author_string" final_id="COIMG-150" onclick="toggle_me()" style="cursor: pointer;">David J. Brady</span><span class="author_string" final_id="COIMG-150" onclick="toggle_me()" style="cursor: pointer;">, The University of Arizona (United States)</span><span class="abstract_link" final_id="COIMG-150" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-150" id="abstract-COIMG-150" onclick="toggle_me()" style="display:none; cursor:pointer;">Phase retrieval on direct or secondary scatter enables remote object identification and imaging at resolution 10-100x finer than the single aperture diffraction limit. This talk presents theoretical analysis and experimental results exploring the limits of this technique. We describe experiments demonstrating <50 micron feature reconstruction at a range of 100 m, corresponding to sub-microradian resolution. We consider the feasibility and potential applications of nanoradian resolving systems. We discuss the limits of scatter imaging on terrestrial targets, drones, space debris and asteroids. We compare scatter imaging of laser and solar-illuminated targets and discuss resolution limits as a function of source coherence. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p> </p> <p class="presentation_time" style="text-align:left;">3:50<a name="COIMG-151"></a><span style="float: right;">COIMG-151</span> <br> <span class="presentation_title" final_id="COIMG-151" onclick="toggle_me()" style="cursor: pointer;">Diffractive optical networks & computational imaging without a computer, </span><span class="author_string" final_id="COIMG-151" onclick="toggle_me()" style="cursor: pointer;">Aydogan Ozcan</span><span class="author_string" final_id="COIMG-151" onclick="toggle_me()" style="cursor: pointer;">, UCLA (United States)</span><span class="abstract_link" final_id="COIMG-151" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-151" id="abstract-COIMG-151" onclick="toggle_me()" style="display:none; cursor:pointer;">I will discuss diffractive optical networks designed by deep learning to all-optically implement various complex functions as the input light diffracts through spatially-engineered surfaces. These diffractive processors designed by deep learning have various applications, e.g., all-optical image analysis, feature detection, object classification, computational imaging and seeing through diffusers, also enabling task-specific camera designs and new optical components for spatial, spectral and temporal beam shaping and spatially-controlled wavelength division multiplexing. These deep learning-designed diffractive systems can broadly impact (1) all-optical statistical inference engines, (2) computational camera and microscope designs and (3) inverse design of optical systems that are task-specific. In this talk, I will give examples of each group, enabling transformative capabilities for various applications of interest in e.g., autonomous systems, defense/security, telecommunications as well as biomedical imaging and sensing. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p> </p> <p class="presentation_time" style="text-align:left;">4:10<a name="COIMG-152"></a><span style="float: right;">COIMG-152</span> <br> <span class="presentation_title" final_id="COIMG-152" onclick="toggle_me()" style="cursor: pointer;">Computational microscopy of scattering samples, </span><span class="author_string" final_id="COIMG-152" onclick="toggle_me()" style="cursor: pointer;">Shwetadwip Chowdhury</span><span class="author_string" final_id="COIMG-152" onclick="toggle_me()" style="cursor: pointer;">, University of Texas at Austin (United States)</span><span class="abstract_link" final_id="COIMG-152" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-152" id="abstract-COIMG-152" onclick="toggle_me()" style="display:none; cursor:pointer;">Optical imaging is a major research tool in the basic sciences, and provides both morphological and molecular-specific imaging capabilities. Furthermore, it is the only imaging modality that routinely enables non-ionized imaging with subcellular spatial resolutions and high imaging speeds. In biological imaging applications, however, optical imaging is limited by tissue scattering to short imaging depths. This enables optical imaging of only outer superficial layers of an organism, or specific components isolated from within the organism and prepared in-vitro. I present recent developments in computational microscopy that enable 1) 3D phase and fluorescent super-resolution using optical scattering; and 2) 3D refractive-index imaging of heterogeneously scattering samples. I will discuss the computational frameworks that underpin these applications, which are based on large-scale nonlinear and nonconvex optimization. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p> </p> <p class="presentation_time" style="text-align:left;">4:30<a name="COIMG-153"></a><span style="float: right;">COIMG-153</span> <br> <span class="presentation_title" final_id="COIMG-153" onclick="toggle_me()" style="cursor: pointer;">Practical phase retrieval using double deep image priors, </span><span class="author_string" final_id="COIMG-153" onclick="toggle_me()" style="cursor: pointer;">Zhong Zhuang<sup>1</sup>, </span><span class="author_string" final_id="COIMG-153" onclick="toggle_me()" style="cursor: pointer;">David Yang<sup>2</sup>, </span><span class="author_string" final_id="COIMG-153" onclick="toggle_me()" style="cursor: pointer;">Felix Hofmann<sup>2</sup>, </span><span class="author_string" final_id="COIMG-153" onclick="toggle_me()" style="cursor: pointer;">David Barmherzig<sup>3</sup>, and </span><span class="author_string" final_id="COIMG-153" onclick="toggle_me()" style="cursor: pointer;">Ju Sun<sup>1</sup></span><span class="author_string" final_id="COIMG-153" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>University of Minnesota, Twin Cities (United States), <sup>2</sup>University of Oxford (United Kingdom), and <sup>3</sup>Flatiron Institute (United States)</span><span class="abstract_link" final_id="COIMG-153" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-153" id="abstract-COIMG-153" onclick="toggle_me()" style="display:none; cursor:pointer;">Phase retrieval (PR) consists of recovering complex-valued objects from their oversampled Fourier magnitudes and takes a central place in scientific imaging. A critical issue around PR is the typical nonconvexity in natural formulations and the associated bad local minimizers. The issue is exacerbated when the support of the object is not precisely known and hence must be overspecified in practice. Practical methods for PR hence involve convolved algorithms, e.g., multiple cycles of hybrid input-output (HIO) + error reduction (ER), to avoid the bad local minimizers and attain reasonable speed, and heuristics to refine the support of the object, e.g., the famous shrinkwrap trick. Overall, the convolved algorithms and the support-refinement heuristics induce multiple algorithm hyperparameters, to which the recovery quality is often sensitive. In this work, we propose a novel PR method by parameterizing the object as the output of a learnable neural network, i.e., deep image prior (DIP). For complex-valued objects in PR, we can flexibly parametrize the magnitude and phase, or the real and imaginary parts separately by two DIPs. We show that this simple idea, free from multi-hyperparameter tuning and support-refinement heuristics, can obtain superior performance than gold-standard PR methods. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p> </p> <p class="presentation_time" style="text-align:left;">4:50<a name="COIMG-154"></a><span style="float: right;">COIMG-154</span> <br> <span class="presentation_title" final_id="COIMG-154" onclick="toggle_me()" style="cursor: pointer;">Synthetic wavelength imaging - Exploiting spectral diversity for absolute phase measurements through scattering scenes, </span><span class="author_string" final_id="COIMG-154" onclick="toggle_me()" style="cursor: pointer;">Florian Willomitzer</span><span class="author_string" final_id="COIMG-154" onclick="toggle_me()" style="cursor: pointer;">, University of Arizona (United States)</span><span class="abstract_link" final_id="COIMG-154" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-154" id="abstract-COIMG-154" onclick="toggle_me()" style="display:none; cursor:pointer;">Scattering severely limits the visual acuity of an imaging system. This talk discusses how diversity in illumination wavelength can be utilized to circumvent the problem of phase randomization in scattered light fields. Amongst other applications, the introduced method allows for holographic measurements of hidden objects around corners and through scattering media, or for interferometric measurements of macroscopic objects with rough surfaces. This is possible as the technique interrogates the scene at two closely spaced optical wavelengths and computationally assembles a complex “synthetic field” at a “synthetic wave,” which is used for further processing. As the synthetic wavelength is the beat wavelength of the two optical wavelengths, it can be picked orders of magnitudes larger, and the computationally assembled synthetic field becomes immune to the deleterious effect of speckle. During the talk, different flavors of the technique will be introduced, using the examples of our latest experimental results. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p> </p> <p class="presentation_time" style="text-align:left;">5:10<a name="COIMG-155"></a><span style="float: right;">COIMG-155</span> <br> <span class="presentation_title" final_id="COIMG-155" onclick="toggle_me()" style="cursor: pointer;">Commissioning the James Webb Space Telescope, </span><span class="author_string" final_id="COIMG-155" onclick="toggle_me()" style="cursor: pointer;">Joseph M. Howard</span><span class="author_string" final_id="COIMG-155" onclick="toggle_me()" style="cursor: pointer;">, NASA Goddard Space Flight Center (United States)</span><span class="abstract_link" final_id="COIMG-155" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-155" id="abstract-COIMG-155" onclick="toggle_me()" style="display:none; cursor:pointer;">Astronomy is arguably in a golden age, where current and future NASA space telescopes are expected to contribute to this rapid growth in understanding of our universe. The most recent addition to our space-based telescopes dedicated to astronomy and astrophysics is the James Webb Space Telescope (JWST), which launched on 25 December 2021. This talk will discuss the first six months in space for JWST, which were spent commissioning the observatory with many deployments, alignments, and system and instrumentation checks. These engineering activities help verify the proper working of the telescope prior to commencing full science operations. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p> </p> <br> <br> <p class="event_time">5:30 – 7:00 PM EI 2023 Symposium Demonstration Session (in the Cyril Magnin Foyer)</p> <p class="date">Wednesday 18 January 2023</p> <div class="callout"> <p class="session_title">KEYNOTE: Processing at the Edge (W1)<img alt="" class="flag_image" src="http://www.imaging.org/images/IST_Images/Conferences/EI/Joint-Session.png" style="vertical-align: middle; margin-left:1em;"></p> <span class="chair">Session Chairs: Stanley Chan, Purdue University (United States) and Boyd Fowler, OmniVision Technologies (United States)<br> </span><span class="session_time">8:45 – 10:20 AM</span> <br> <span class="room">Market Street </span> <br> <span></span> <p class="session_notes">This session is jointly sponsored by: Computational Imaging XXI, Imaging Sensors and Systems 2023, and the International Image Sensor Society (IISS). </p> <br> <p class="presentation_time" style="text-align:left;">8:45<br> <span class="presentation_title">COIMG/ISS Joint Sessions Welcome</span> </p> <p class="presentation_time" style="text-align:left;">8:50<a name="COIMG-177"></a><a name="COIMG-177"></a><span style="float: right;">COIMG-177</span> <br> <span class="presentation_title" final_id="COIMG-177" onclick="toggle_me()" style="cursor: pointer;">KEYNOTE: Deep optics: Learning cameras and optical computing systems, </span><span class="author_string" final_id="COIMG-177" onclick="toggle_me()" style="cursor: pointer;">Gordon Wetzstein</span><span class="author_string" final_id="COIMG-177" onclick="toggle_me()" style="cursor: pointer;">, Stanford University (United States)</span><span class="abstract_link" final_id="COIMG-177" onclick="toggle_me()"> [view abstract] </span></p> <p class="session_notes"> </p> <p class="session_notes">Gordon Wetzstein is an Associate Professor of Electrical Engineering and, by courtesy, of Computer Science at Stanford University. He is the leader of the Stanford Computational Imaging Lab and a faculty co-director of the Stanford Center for Image Systems Engineering. At the intersection of computer graphics and vision, artificial intelligence, computational optics, and applied vision science, Prof. Wetzstein's research has a wide range of applications in next-generation imaging, wearable computing, and neural rendering systems. Prof. Wetzstein is a Fellow of Optica and the recipient of numerous awards, including an NSF CAREER Award, an Alfred P. Sloan Fellowship, an ACM SIGGRAPH Significant New Researcher Award, a Presidential Early Career Award for Scientists and Engineers (PECASE), an SPIE Early Career Achievement Award, an Electronic Imaging Scientist of the Year Award, an Alain Fournier Ph.D. Dissertation Award as well as many Best Paper and Demo Awards.</p> <p class="abstract" final_id="COIMG-177" id="abstract-COIMG-177" onclick="toggle_me()" style="display:none; cursor:pointer;">Neural networks excel at a wide variety of imaging and perception tasks, but their high performance also comes at a high computational cost and their success on edge devices is often limited. In this talk, we explore hybrid optical-electronic strategies to computational imaging that outsource parts of the algorithm into the optical domain or into emerging in-pixel processing capabilities. Using such a co-design of optics, electronics, and image processing, we can learn application-domain-specific cameras using modern artificial intelligence techniques or compute parts of a convolutional neural network in optics with little to no computational overhead. For the session: Processing at the Edge (joint with ISS).</p> <p> </p> <p class="presentation_time" style="text-align:left;">9:40<a name="COIMG-178"></a><a name="COIMG-178"></a><span style="float: right;">COIMG-178</span> <br> <span class="presentation_title" final_id="COIMG-178" onclick="toggle_me()" style="cursor: pointer;">Computational photography on a smartphone, </span><span class="author_string" final_id="COIMG-178" onclick="toggle_me()" style="cursor: pointer;">Michael Polley</span><span class="author_string" final_id="COIMG-178" onclick="toggle_me()" style="cursor: pointer;">, Samsung Research America (United States)</span><span class="abstract_link" final_id="COIMG-178" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-178" id="abstract-COIMG-178" onclick="toggle_me()" style="display:none; cursor:pointer;">Many of the recent advances in smartphone camera quality and features can be attributed to computational photography. However, the increased computational requirements must be balanced with cost, power, and other practical concerns. In this talk, we look at the embedded signal processing currently applied, including new AI-based solutions in the signal chain. By taking advantage of increasing computational performances of traditional processor cores, and additionally tapping into the exponentially increasing capabilities of the new compute engines such as neural processing units, we are able to deliver on-device computational imaging. For the session: Processing at the Edge (joint with ISS).</p> <p> </p> <p class="presentation_time" style="text-align:left;">10:00<a name="COIMG-179"></a><a name="COIMG-179"></a><span style="float: right;">COIMG-179</span> <br> <span class="presentation_title" final_id="COIMG-179" onclick="toggle_me()" style="cursor: pointer;">Analog in-memory computing with multilevel RRAM for edge electronic imaging application, </span><span class="author_string" final_id="COIMG-179" onclick="toggle_me()" style="cursor: pointer;">Glenn Ge</span><span class="author_string" final_id="COIMG-179" onclick="toggle_me()" style="cursor: pointer;">, Teramem Inc. (United States)</span><span class="abstract_link" final_id="COIMG-179" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-179" id="abstract-COIMG-179" onclick="toggle_me()" style="display:none; cursor:pointer;">Conventional digital processors based on the von Neumann architecture have an intrinsic bottleneck in data transfer between processing and memory units. This constraint increasingly limits performance as data sets continue to grow exponentially for the various applications, especially for the Electronic Imaging Applications at the edge, for instance, the AR/VR wearable and automotive applications. TetraMem addresses this issue by delivering state-of-the-art in-memory computing using our proprietary non-volatile computing devices. This talk will discuss how TetraMem’s solution brings several orders of magnitude improvement in computing throughput and energy efficiency, ideal for those AI fusion sensing applications at the edge. For the session: Processing at the Edge (joint with ISS).</p> <p> </p> </div> <br> <p class="event_time">10:00 AM – 3:30 PM Industry Exhibition - Wednesday (in the Cyril Magnin Foyer)</p> <p class="event_time">10:20 – 10:50 AM Coffee Break</p> <br> <br> <p class="session_title">Processing at the Edge (W2.1)<img alt="" class="flag_image" src="http://www.imaging.org/images/IST_Images/Conferences/EI/Joint-Session.png" style="vertical-align: middle; margin-left:1em;"></p> <span class="chair_label">Session Chairs: </span> <span class="chair">Stanley Chan, Purdue University (United States) and Boyd Fowler, OmniVision Technologies (United States)<br> </span> <span class="session_time">10:50 – 11:50 AM</span> <br> <span class="room">Market Street </span> <br> <span></span> <p class="session_notes">This session is jointly sponsored by: Computational Imaging XXI, Imaging Sensors and Systems 2023, and the International Image Sensor Society (IISS).</p> <br> <p class="presentation_time" style="text-align:left;">10:50<a name="COIMG-180"></a><a name="COIMG-180"></a><span style="float: right;">COIMG-180</span> <br> <span class="presentation_title" final_id="COIMG-180" onclick="toggle_me()" style="cursor: pointer;">Processing of real time, bursty and high compute iToF data on the edge (Invited), </span><span class="author_string" final_id="COIMG-180" onclick="toggle_me()" style="cursor: pointer;">Cyrus Bamji</span><span class="author_string" final_id="COIMG-180" onclick="toggle_me()" style="cursor: pointer;">, Microsoft Corporation (United States)</span><span class="abstract_link" final_id="COIMG-180" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-180" id="abstract-COIMG-180" onclick="toggle_me()" style="display:none; cursor:pointer;">In indirect time of flight (iToF), a depth frame is computed from multiple image captures (often 6-9 captures) which are composed together and processed using nonlinear filters. iToF sensor output bandwidth is high and inside the camera special purpose DSP hardware significantly improves power, cost and shuffling around of large amounts of data. Usually only a small percentage of depth frames need application specific processing and highest quality depth data both of which are difficult to compute within the limited hardware resources of the camera. Due to the sporadic nature of these compute requirements hardware utilization is improved by offloading this bursty compute to outside the camera. Many applications in the Industrial and commercial space have a real time requirement and may even use multiple cameras that need to be synchronized. These real time requirements coupled with the high bandwidth from the sensor makes offloading the compute purely into the cloud difficult. Thus, in many cases the compute edge can provide a goldilocks zone for this bursty high bandwidth and real-time processing requirement. For the session: Processing at the Edge (joint with ISS).</p> <p> </p> <p class="presentation_time" style="text-align:left;">11:10<a name="COIMG-181"></a><a name="COIMG-181"></a><span style="float: right;">COIMG-181</span> <br> <span class="presentation_title" final_id="COIMG-181" onclick="toggle_me()" style="cursor: pointer;">A distributed on-sensor compute system in AR/VR devices and neural architecture search (NAS) framework for optimal workload distribution (Invited), </span><span class="author_string" final_id="COIMG-181" onclick="toggle_me()" style="cursor: pointer;">Chiao Liu<sup>1</sup>, </span><span class="author_string" final_id="COIMG-181" onclick="toggle_me()" style="cursor: pointer;">Xin Dong<sup>2</sup>, </span><span class="author_string" final_id="COIMG-181" onclick="toggle_me()" style="cursor: pointer;">Ziyun Li<sup>1</sup>, </span><span class="author_string" final_id="COIMG-181" onclick="toggle_me()" style="cursor: pointer;">Barbara De Salvo<sup>3</sup>, and </span><span class="author_string" final_id="COIMG-181" onclick="toggle_me()" style="cursor: pointer;">H. T. Kung<sup>2</sup></span><span class="author_string" final_id="COIMG-181" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Reality Labs, <sup>2</sup>Harvard University, and <sup>3</sup>Meta (United States)</span><span class="abstract_link" final_id="COIMG-181" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-181" id="abstract-COIMG-181" onclick="toggle_me()" style="display:none; cursor:pointer;">Augmented Reality (AR) will be the next great wave of human oriented computing, dominating our relationship with the digital world for the next 50 years. The combined requirements of lowest power, best performance, and minimal form factor makes AR sensors the new frontier. Previously we presented a digital pixel sensor (DPS) that could be the optimal sensor architecture for AR applications. We further presented a distributed on-sensor compute architecture, coupled with new 3-layer sensor stacking technologies to enable the system to distribute the computation between sensors and main SoC in an AR system. In this talk, we study a deep neural network (DNN) as work load example and the network’s optimal splitting layer location to meet system performance requirements such as inference accuracy and latency under the given hardware resource constraint. We designed a split-aware neural architecture search (NAS) framework, SplitNets, to conduct model design, split, and communication reduction simultaneously. We validated SplitNets on ImageNet, and show that the SplitNets framework achieves state-of-the-art (SOTA) performance and system latency compared with existing approaches. For the session: Processing at the Edge (joint with ISS).</p> <p> </p> <p class="presentation_time" style="text-align:left;">11:30<a name="ISS-182"></a><a name="ISS-182"></a><span style="float: right;">ISS-182</span> <br> <span class="presentation_title" final_id="ISS-182" onclick="toggle_me()" style="cursor: pointer;">A 2.2um three-wafer stacked back side illuminated voltage domain global shutter CMOS image sensor, </span><span class="author_string" final_id="ISS-182" onclick="toggle_me()" style="cursor: pointer;">Shimpei Fukuoka</span><span class="author_string" final_id="ISS-182" onclick="toggle_me()" style="cursor: pointer;">, OmniVision (Japan)</span><span class="abstract_link" final_id="ISS-182" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="ISS-182" id="abstract-ISS-182" onclick="toggle_me()" style="display:none; cursor:pointer;">Due to the emergence of machine vision, augmented reality (AR), virtual reality (VR), and automotive connectivity in recent years, the necessity for chip miniaturization has grown. These emerging, next-generation applications, which are centered on user experience and comfort, require their constituent chips, devices, and parts to be smaller, lighter, and more accessible. AR/VR applications, especially demand smaller components due to their primary application towards wearable technology, in which the user experience would be negatively impacted by large features and bulk. Therefore, chips and devices intended for next-generation consumer applications must be small and modular, to support module miniaturization and promote user comfort. To enable the chip miniaturization required for technological advancement and innovation, we developed a 2.2μm pixel pitch Back Side Illuminated (BSI) Voltage Domain Global Shutter (VDGS) image sensor with the three-wafer stacked technology. Each wafer is connected by Stacked Pixel Level Connection (SPLC) and the middle and logic wafers are connected using a Back side Through Silicon Via (BTSV). The separation of the sensing, charge storage, and logic functions to different wafers allows process optimization in each wafer, improving overall chip performance. The peripheral circuit region is reduced by 75% compared to the previous product without degrading image sensor performance. For the session: Processing at the Edge (joint with COIMG).</p> <p> </p> <br> <br> <p class="session_title">HDR Imaging / Reflection Removal (W2.2)</p> <span class="chair_label">Session Chair: </span> <span class="chair">Gregery Buzzard, Purdue University (United States)<br> </span> <span class="session_time">11:50 AM – 12:30 PM</span> <br> <span class="room">Market Street<br> </span> <p class="presentation_time" style="text-align:left;">11:50<a name="COIMG-156"></a><span style="float: right;">COIMG-156</span> <br> <span class="presentation_title" final_id="COIMG-156" onclick="toggle_me()" style="cursor: pointer;">A lightweight exposure bracketing strategy for HDR imaging without access to camera raw, </span><span class="author_string" final_id="COIMG-156" onclick="toggle_me()" style="cursor: pointer;">Jieyu Li<sup>1</sup>, </span><span class="author_string" final_id="COIMG-156" onclick="toggle_me()" style="cursor: pointer;">Ruiwen Zhen<sup>2</sup>, and </span><span class="author_string" final_id="COIMG-156" onclick="toggle_me()" style="cursor: pointer;">Robert L. Stevenson<sup>1</sup></span><span class="author_string" final_id="COIMG-156" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>University of Notre Dame and <sup>2</sup>SenseBrain Technology (United States)</span><span class="abstract_link" final_id="COIMG-156" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-156" id="abstract-COIMG-156" onclick="toggle_me()" style="display:none; cursor:pointer;">A lightweight learning-based exposure bracketing strategy is proposed in this paper for high dynamic range (HDR) imaging without access to camera RAW. Some low-cost, power-efficient cameras, such as webcams, video surveillance cameras, sport cameras, mid-tier cellphone cameras, and navigation cameras on robots, can only provide access to 8-bit low dynamic range (LDR) images. Exposure fusion is a classical approach to capture HDR scenes by fusing images taken with different exposures into a 8-bit tone-mapped HDR image. A key question is what the optimal set of exposure settings are to cover the scene dynamic range and achieve a desirable tone. The proposed lightweight neural network predicts these exposure settings for a 3-shot exposure bracketing, given the input irradiance information from 1) the histograms of an auto-exposure LDR preview image, and 2) the maximum and minimum levels of the scene irradiance. Without the processing of the preview image streams, and the circuitous route of first estimating the scene HDR irradiance and then tone-mapping to 8-bit images, the proposed method gives a more practical HDR enhancement for real-time and on-device applications. Experiments on a number of challenging images reveal the advantages of our method in comparison with other state-of-the-art methods qualitatively and quantitatively.</p> <p> </p> <p class="presentation_time" style="text-align:left;">12:10<a name="COIMG-157"></a><span style="float: right;">COIMG-157</span> <br> <span class="presentation_title" final_id="COIMG-157" onclick="toggle_me()" style="cursor: pointer;">Sparse x-ray phase contrast dark field tomography, </span><span class="author_string" final_id="COIMG-157" onclick="toggle_me()" style="cursor: pointer;">Johnathan Mulcahy-Stanislawczyk and </span><span class="author_string" final_id="COIMG-157" onclick="toggle_me()" style="cursor: pointer;">Amber L. Dagel</span><span class="author_string" final_id="COIMG-157" onclick="toggle_me()" style="cursor: pointer;">, Sandia National Laboratories (United States)</span><span class="abstract_link" final_id="COIMG-157" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-157" id="abstract-COIMG-157" onclick="toggle_me()" style="display:none; cursor:pointer;">X-Ray Phase Contrast Imaging (XPCI) augments absorption radiography with additional information related to the refractive and scattering properties of a sample. Grating-based XPCI allows broadband laboratory x-ray sources to be used, increasing the technique’s accessibility. However, grating-based techniques require repeatedly moving a grating and capturing an image at each location. Additionally, the gratings themselves are absorptive, reducing x-ray flux. As a result, data acquisition times and radiation dosages present a hurdle to practical application of XPCI tomography. We present a plug-and-play (PnP) reconstruction method for XPCI dark field tomographic reconstruction with sparse views. Dark field XPCI radiographs contain information about a sample’s microstructure and scatter. The dark field reveals subpixel sample properties, including crystalline structure, graininess, and material interfaces. This makes dark field images differently distributed from traditional absorption radiographs and natural imagery. PnP methods give greater control over reconstruction regularization compared to traditional iterative reconstruction techniques, which is especially useful given the dark field’s unique distribution. PnP allows us to collect dark field tomographic datasets with fewer projections, increasing XPCI’s practicality by reducing the amount of data needed for 3D reconstruction.</p> <p> </p> <br> <br> <p class="event_time">12:30 – 2:00 PM Lunch</p> <div class="pinkcallout"> <p class="session_title">Wednesday 18 January PLENARY: Bringing Vision Science to Electronic Imaging: The Pyramid of Visibility</p> <span class="chair">Session Chair: Andreas Savakis, Rochester Institute of Technology (United States)<br> </span> <span class="session_time">2:00 PM – 3:00 PM</span> <br> <span class="room">Cyril Magnin I/II/III<br> </span> <span></span> <p class="session_notes">Electronic imaging depends fundamentally on the capabilities and limitations of human vision. The challenge for the vision scientist is to describe these limitations to the engineer in a comprehensive, computable, and elegant formulation. Primary among these limitations are visibility of variations in light intensity over space and time, of variations in color over space and time, and of all of these patterns with position in the visual field. Lastly, we must describe how all these sensitivities vary with adapting light level. We have recently developed a structural description of human visual sensitivity that we call the Pyramid of Visibility, that accomplishes this synthesis. This talk shows how this structure accommodates all the dimensions described above, and how it can be used to solve a wide variety of problems in display engineering.</p> <br> <span></span> <p class="session_notes"> </p> <span class="author_string"><strong>Andrew B. Watson, </strong>chief vision scientist, Apple Inc. (United States)<span class="author_string"></span></span> <p> </p> <span></span> <p class="session_notes">Andrew Watson is Chief Vision Scientist at Apple, where he leads the application of vision science to technologies, applications, and displays. His research focuses on computational models of early vision. He is the author of more than 100 scientific papers and 8 patents. He has 21,180 citations and an h-index of 63. Watson founded the Journal of Vision, and served as editor-in-chief 2001-2013 and 2018-2022. Watson has received numerous awards including the Presidential Rank Award from the President of the United States.</p> </div> <br> <p class="event_time">3:00 – 3:30 PM Coffee Break</p> <p class="session_title">Imaging with Coded Apertures (W3)</p> <span class="chair_label">Session Chair: </span> <span class="chair">Xiaogang Yang, Brookhaven National Laboratory (United States)<br> </span> <span class="session_time">3:30 – 5:30 PM</span> <br> <span class="room">Market Street<br> </span> <p class="presentation_time" style="text-align:left;">3:30<a name="COIMG-162"></a><span style="float: right;">COIMG-162</span> <br> <span class="presentation_title" final_id="COIMG-162" onclick="toggle_me()" style="cursor: pointer;">X-ray phase contrast imaging using apertures: From proof-of-concept experiments at synchrotrons to pre-commercial prototypes with conventional sources, </span><span class="author_string" final_id="COIMG-162" onclick="toggle_me()" style="cursor: pointer;">Alessandro Olivo</span><span class="author_string" final_id="COIMG-162" onclick="toggle_me()" style="cursor: pointer;">, University College London (United Kingdom)</span><span class="abstract_link" final_id="COIMG-162" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-162" id="abstract-COIMG-162" onclick="toggle_me()" style="display:none; cursor:pointer;">X-ray phase contrast imaging using apertures: from proof-of-concept experiments at synchrotrons to pre-commercial prototypes with conventional sources. For the session: Imaging with Coded Apertures.</p> <p> </p> <p class="presentation_time" style="text-align:left;">3:50<a name="COIMG-163"></a><span style="float: right;">COIMG-163</span> <br> <span class="presentation_title" final_id="COIMG-163" onclick="toggle_me()" style="cursor: pointer;">Deep regularization functions for coded-aperture design in computational imaging, </span><span class="author_string" final_id="COIMG-163" onclick="toggle_me()" style="cursor: pointer;">Roman Jacome, </span><span class="author_string" final_id="COIMG-163" onclick="toggle_me()" style="cursor: pointer;">Emmanuel Martinez, </span><span class="author_string" final_id="COIMG-163" onclick="toggle_me()" style="cursor: pointer;">Jorge Bacca, and </span><span class="author_string" final_id="COIMG-163" onclick="toggle_me()" style="cursor: pointer;">Henry Arguello Fuentes</span><span class="author_string" final_id="COIMG-163" onclick="toggle_me()" style="cursor: pointer;">, Universidad Industrial de Santander (Colombia)</span><span class="abstract_link" final_id="COIMG-163" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-163" id="abstract-COIMG-163" onclick="toggle_me()" style="display:none; cursor:pointer;">Deep regularization functions for coded-aperture design in computational imaging. For the session: Imaging with Coded Apertures.</p> <p> </p> <p class="presentation_time" style="text-align:left;">4:10<a name="COIMG-160"></a><span style="float: right;">COIMG-160</span> <br> <span class="presentation_title" final_id="COIMG-160" onclick="toggle_me()" style="cursor: pointer;">CodEx: A modular framework for joint temporal de-blurring and tomographic reconstruction, </span><span class="author_string" final_id="COIMG-160" onclick="toggle_me()" style="cursor: pointer;">Soumendu Majee<sup>1</sup>, </span><span class="author_string" final_id="COIMG-160" onclick="toggle_me()" style="cursor: pointer;">Selin Aslan<sup>2</sup>, </span><span class="author_string" final_id="COIMG-160" onclick="toggle_me()" style="cursor: pointer;">Doga Gursoy<sup>2</sup>, and </span><span class="author_string" final_id="COIMG-160" onclick="toggle_me()" style="cursor: pointer;">Charles A. Bouman<sup>3</sup></span><span class="author_string" final_id="COIMG-160" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Samsung Research America, <sup>2</sup>Argonne National Laboratory, and <sup>3</sup>Purdue University (United States)</span><span class="abstract_link" final_id="COIMG-160" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-160" id="abstract-COIMG-160" onclick="toggle_me()" style="display:none; cursor:pointer;">In many computed tomography (CT) imaging applications, it is important to rapidly collect data from an object that is moving or changing with time. Tomographic acquisition is generally assumed to be step-and-shoot, where the object is rotated to each desired angle, and a view is taken. However, step-and-shoot acquisition is slow and can waste photons, so in practice fly-scanning is done where the object is continuously rotated while collecting data. However, this can result in motion-blurred views and consequently reconstructions with severe motion artifacts. In this paper, we introduce CodEx, a modular framework for joint de-blurring and tomographic reconstruction that can effectively invert the motion blur introduced in sparse view fly-scanning. The method is a synergistic combination of a novel acquisition method with a novel non-convex Bayesian reconstruction algorithm. CodEx works by encoding the acquisition with a known binary code that the reconstruction algorithm then inverts. Using a well chosen binary code to encode the measurements can improve the accuracy of the inversion process. The CodEx reconstruction method uses the alternating direction method of multipliers (ADMM) to split the inverse problem into iterative deblurring and reconstruction sub-problems, making reconstruction practical to implement. We present reconstruction results on both simulated and binned experimental data to demonstrate the effectiveness of our method. For the session: Imaging with Coded Apertures.</p> <p> </p> <p class="presentation_time" style="text-align:left;">4:30<a name="COIMG-158"></a><span style="float: right;">COIMG-158</span> <br> <span class="presentation_title" final_id="COIMG-158" onclick="toggle_me()" style="cursor: pointer;">First use of coded-apertures for depth-resolved Laue diffraction, </span><span class="author_string" final_id="COIMG-158" onclick="toggle_me()" style="cursor: pointer;">Doga Gursoy, </span><span class="author_string" final_id="COIMG-158" onclick="toggle_me()" style="cursor: pointer;">Dina Sheyfer, </span><span class="author_string" final_id="COIMG-158" onclick="toggle_me()" style="cursor: pointer;">Michael J. Wojcik, </span><span class="author_string" final_id="COIMG-158" onclick="toggle_me()" style="cursor: pointer;">Wenjun Liu, and </span><span class="author_string" final_id="COIMG-158" onclick="toggle_me()" style="cursor: pointer;">Jon Tischler</span><span class="author_string" final_id="COIMG-158" onclick="toggle_me()" style="cursor: pointer;">, Argonne National Laboratory (United States)</span><span class="abstract_link" final_id="COIMG-158" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-158" id="abstract-COIMG-158" onclick="toggle_me()" style="display:none; cursor:pointer;">First use of coded-apertures for depth-resolved Laue diffraction. For the session: Imaging with Coded Apertures.</p> <p> </p> <p class="presentation_time" style="text-align:left;">4:50<a name="COIMG-159"></a><span style="float: right;">COIMG-159</span> <br> <span class="presentation_title" final_id="COIMG-159" onclick="toggle_me()" style="cursor: pointer;">Deep learning image reconstruction for Laue microdiffraction with coded-apertures, </span><span class="author_string" final_id="COIMG-159" onclick="toggle_me()" style="cursor: pointer;">Xiaogang Yang<sup>1</sup>, </span><span class="author_string" final_id="COIMG-159" onclick="toggle_me()" style="cursor: pointer;">Esther Tsai<sup>1</sup>, and </span><span class="author_string" final_id="COIMG-159" onclick="toggle_me()" style="cursor: pointer;">Doga Gursoy<sup>2</sup></span><span class="author_string" final_id="COIMG-159" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Brookhaven National Laboratory and <sup>2</sup>Argonne National Laboratory (United States)</span><span class="abstract_link" final_id="COIMG-159" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-159" id="abstract-COIMG-159" onclick="toggle_me()" style="display:none; cursor:pointer;">Scanning coded-apertures across the diffracted x-ray beams enabled the structural measurement of crystal materials such as orientation and strain with depth information. It resolves full diffraction information at sub-micrometer volume elements in bulk materials with a rapid process. The reconstruction from scanned signal to structural information is a typical inverse problem for linear equations. Specific sampling and noise conditions are required for a decent quality of reconstruction with conventional methods. However, these conditions may be varied in real measurements. We will present our study on deep learning reconstructions for coded-aperture measurements. A model-based image reconstruction with Deep Neural Networks will be applied for the coded-aperture modality. We will demonstrate the robust reconstruction process under undersampled and noisy data conditions. For the session: Imaging with Coded Apertures.</p> <p> </p> <p class="presentation_time" style="text-align:left;">5:10<a name="COIMG-161"></a><span style="float: right;">COIMG-161</span> <br> <span class="presentation_title" final_id="COIMG-161" onclick="toggle_me()" style="cursor: pointer;">Coded aperture fabrication for x-ray experiments at the Advanced Photon Source, </span><span class="author_string" final_id="COIMG-161" onclick="toggle_me()" style="cursor: pointer;">Michael J. Wojcik, </span><span class="author_string" final_id="COIMG-161" onclick="toggle_me()" style="cursor: pointer;">Dina Sheyfer, </span><span class="author_string" final_id="COIMG-161" onclick="toggle_me()" style="cursor: pointer;">Doga Gursoy, </span><span class="author_string" final_id="COIMG-161" onclick="toggle_me()" style="cursor: pointer;">Jon Tischler, </span><span class="author_string" final_id="COIMG-161" onclick="toggle_me()" style="cursor: pointer;">Ralu Divan, and </span><span class="author_string" final_id="COIMG-161" onclick="toggle_me()" style="cursor: pointer;">David Czaplewski</span><span class="author_string" final_id="COIMG-161" onclick="toggle_me()" style="cursor: pointer;">, Argonne National Laboratory Advanced Photon Source (United States)</span><span class="abstract_link" final_id="COIMG-161" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-161" id="abstract-COIMG-161" onclick="toggle_me()" style="display:none; cursor:pointer;">The Advanced Photon Source (APS) is a third generation X-ray synchrotron with more than 50 operating beamlines using X-rays to study materials and systems. Beamlines at APS use coded apertures for many X-ray techniques and the APS Optics group has a Nanofabricated Optics section whose purpose is mostly to develop and fabricate these optics. The coded aperture optics include gratings, zone plates, random arrays, and coded bit arrays. Through utilizing the cleanroom at the Center for Nanoscale Materials a variety of recipes have been created for sub-20 nm smallest zone width zone plates to millimeter area 2-D gratings with micron pitch. The coded apertures have been used in experiments to measure coherence of the APS source, improve depth resolution measurements for Laue diffraction measurements, and many others. This paper will also discuss fabrication limitations and future possibilities for coded apertures. For the session: Imaging with Coded Apertures.</p> <p> </p> <br> <br> <p class="session_title">Computational Imaging XXI Interactive (Poster) Paper Session</p> <span class="session_time">5:30 – 7:00 PM</span> <br> <span class="room">Cyril Magnin Foyer </span> <br> <span></span> <p class="session_notes">The following work will be presented at the EI 2023 Symposium Interactive (Poster) Paper Session.</p> <br> <p class="presentation_time" style="text-align:left;"> <a name="COIMG-164"></a><span style="float: right;">COIMG-164</span> <br> <span class="presentation_title" final_id="COIMG-164" onclick="toggle_me()" style="cursor: pointer;">Spectral recovery in a photograph with a hyperspectral color chart, </span><span class="author_string" final_id="COIMG-164" onclick="toggle_me()" style="cursor: pointer;">Semin Kwon, </span><span class="author_string" final_id="COIMG-164" onclick="toggle_me()" style="cursor: pointer;">Sang Mok Park, </span><span class="author_string" final_id="COIMG-164" onclick="toggle_me()" style="cursor: pointer;">Yuhyun Ji, </span><span class="author_string" final_id="COIMG-164" onclick="toggle_me()" style="cursor: pointer;">Jungwoo Leem, and </span><span class="author_string" final_id="COIMG-164" onclick="toggle_me()" style="cursor: pointer;">Young L. Kim</span><span class="author_string" final_id="COIMG-164" onclick="toggle_me()" style="cursor: pointer;">, Purdue University (United States)</span><span class="abstract_link" final_id="COIMG-164" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-164" id="abstract-COIMG-164" onclick="toggle_me()" style="display:none; cursor:pointer;">Here we introduce an idea of photospectroscopy that can recover ambient spectral information from a photograph with a color reference chart taken by a smartphone camera. This color reference chart has an analogy with a hyperspectral filter array and provides the base information to recover the hidden spectral information in a photograph. As the number of colors configured in the color reference chart is abundant, an ill-posed problem of reconstructing the spectral information from RGB values can be formulated as an overdetermined problemWe test different scenarios and samples using several smartphone models and light conditions to evaluate the performance of spectral recovery. The spectral range of the photospectroscopy method covers a broad range of the visible light determined by the spectral response functions in the R, G, and B channels of the smartphone camera (also known as the sensitivity function of the camera). In conclusion, the combination of the specially designed color reference chart and the robust recovery algorithm enables a new concept of photospectroscopy, supporting the idea that a photograph is more than merely an image and actually contains rich spectral information.</p> <p> </p> <br> <br> <p class="event_time">5:30 – 7:00 PM EI 2023 Symposium Interactive (Poster) Paper Session (in the Cyril Magnin Foyer)</p> <p class="event_time">5:30 – 7:00 PM EI 2023 Meet the Future: A Showcase of Student and Young Professionals Research (in the Cyril Magnin Foyer)</p> <div class="callout"> <div class="callout"> <p class="session_title">PANEL: Next Generation Imaging-on-a-Chip Tech-Mixer Discussion (W4)</p> <span class="chair">Hosts: Charles Bouman, Purdue University (United States) and Gregery Buzzard, Purdue University (United States)<br> </span><span class="chair">Panelists: Stanley Chan, Purdue University (United States); Eiichi Funatsu, OmniVision Technologies, Inc. (United States); Sergio Goma, Qualcomm Inc. (United States); Michael Polley, Samsung Research America (United States); and Anton Tremsin, University of California, Berkeley (United States)<br> </span><span class="session_time">6:00 – 7:00 PM</span> <br> <span class="room">Market Street </span> <br> <span></span> <p class="session_notes">The need to both increase imaging capabilities and reduce cost is driving the need for extreme integration of sensing and processing. For example, in the future, analog sensors will be integrated with associated digital processing using methods such as 3D IC stacking. The function of this panel will be to facilitate discussions in the community on the future of imaging-on-a-chip solutions. What problems will these integrated imaging systems be uniquely suited to solve? How can the tight coupling of sensors and hardware be used to enhance capabilities and reduce cost? What should our community be doing to both enhance and exploit this emerging technology? Refreshments included, beer, wine, and snacks!</p> </div> </div> <br> <p class="date">Thursday 19 January 2023</p> <br> <br> <p class="session_title">Computational Imaging Topics (R1)</p> <span class="chair_label">Session Chair: </span> <span class="chair">Charles Bouman, Purdue University (United States)<br> </span> <span class="session_time">8:50 – 10:10 AM</span> <br> <span class="room">Market Street<br> </span> <p class="presentation_time" style="text-align:left;">8:50<a name="COIMG-165"></a><span style="float: right;">COIMG-165</span> <br> <span class="presentation_title" final_id="COIMG-165" onclick="toggle_me()" style="cursor: pointer;">Generative Adversarial Linear Discriminant Analysis (GALDA) for spectroscopy classification and imaging, </span><span class="author_string" final_id="COIMG-165" onclick="toggle_me()" style="cursor: pointer;">Ziyi Cao, </span><span class="author_string" final_id="COIMG-165" onclick="toggle_me()" style="cursor: pointer;">Shijie Zhang, </span><span class="author_string" final_id="COIMG-165" onclick="toggle_me()" style="cursor: pointer;">Youlin Liu, </span><span class="author_string" final_id="COIMG-165" onclick="toggle_me()" style="cursor: pointer;">Casey Smith, </span><span class="author_string" final_id="COIMG-165" onclick="toggle_me()" style="cursor: pointer;">Alex Sherman, and </span><span class="author_string" final_id="COIMG-165" onclick="toggle_me()" style="cursor: pointer;">Garth Simpson</span><span class="author_string" final_id="COIMG-165" onclick="toggle_me()" style="cursor: pointer;">, Purdue University (United States)</span><span class="abstract_link" final_id="COIMG-165" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-165" id="abstract-COIMG-165" onclick="toggle_me()" style="display:none; cursor:pointer;">Generative adversarial linear discriminant analysis (GALDA) is formulated as a broadly applicable tool for increasing classification accuracy and reducing overfitting inherent in representative linear dimension reduction and classification algorithms. Although inspired by the successes of generative adversarial neural networks (GANs) for minimizing overfitting artifacts in artificial neural networks, GALDA was built around an independent linear algebra framework distinct from those in GANs. In contrast to feature extraction and data reduction approaches for minimizing overfitting, GALDA performs data augmentation by identifying and adversarially excluding the regions in spectral space in which genuine data do not reside. Classification accuracy was evaluated for GALDA together with other commonly available supervised and unsupervised methods for dimension reduction in simulated spectra generated using an open-source Raman database (Romanian Database of Raman Spectroscopy, RDRS). In one study, spectral analysis was performed for microscopy measurements of microsphereroids of the blood thinner clopidogrel bisulfate. In another study, THz Raman imaging of common constituents in aspirin tablets were assigned to generate and compare chemically-selective composition maps. From these collective results, the potential scope of use for GALDA is critically evaluated relative to alternative established spectral dimension reduction and classification methods, and GA based method are proved to increase Spectroscopy Imaging resolution on given THz raman imaging case.</p> <p> </p> <p class="presentation_time" style="text-align:left;">9:10<a name="COIMG-166"></a><span style="float: right;">COIMG-166</span> <br> <span class="presentation_title" final_id="COIMG-166" onclick="toggle_me()" style="cursor: pointer;">Multi-agent consensus equilibrium (MACE) in electronic structure calculations, </span><span class="author_string" final_id="COIMG-166" onclick="toggle_me()" style="cursor: pointer;">Jiayue Rong, </span><span class="author_string" final_id="COIMG-166" onclick="toggle_me()" style="cursor: pointer;">Lyudmila Slipchenko, </span><span class="author_string" final_id="COIMG-166" onclick="toggle_me()" style="cursor: pointer;">Charles A. Bouman, </span><span class="author_string" final_id="COIMG-166" onclick="toggle_me()" style="cursor: pointer;">Gregery T. Buzzard, and </span><span class="author_string" final_id="COIMG-166" onclick="toggle_me()" style="cursor: pointer;">Garth Simpson</span><span class="author_string" final_id="COIMG-166" onclick="toggle_me()" style="cursor: pointer;">, Purdue University (United States)</span><span class="abstract_link" final_id="COIMG-166" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-166" id="abstract-COIMG-166" onclick="toggle_me()" style="display:none; cursor:pointer;">A consensus equilibrium formalism is introduced for the integration of multiple quantum chemical calculations of electronic structure. In multi-agent consensus equilibrium, iterative updates in electronic structure optimization are intertwined, with the net output representing an equilibrium balance between multiple computational agents. MACE electronic structure calculations from integration of multiple low-level electronic structure calculations (HF and M06) compared favorably for urea and tryptophan with results evaluated through comparison with higher level electronic structure (PBE) using an identical basis set (6-31G*). Notably, MACE results differed substantially from the average of the independent computational agent outputs with molecular orbitals for the HOMO and LUMO, with MACE yielding improved agreement with higher-level PBE calculations. These results suggest potential promise for the use of MACE to improve accuracy of low-level electronic structure calculations through integration of multiple parallel methods.</p> <p> </p> <p class="presentation_time" style="text-align:left;">9:30<a name="COIMG-167"></a><span style="float: right;">COIMG-167</span> <br> <span class="presentation_title" final_id="COIMG-167" onclick="toggle_me()" style="cursor: pointer;">Instrumentation and software development for parts-per-million characterization of pharmaceutical crystal forms using AF-PTIR microscopy, </span><span class="author_string" final_id="COIMG-167" onclick="toggle_me()" style="cursor: pointer;">Aleksandr Razumtcev, </span><span class="author_string" final_id="COIMG-167" onclick="toggle_me()" style="cursor: pointer;">Minghe Li, and </span><span class="author_string" final_id="COIMG-167" onclick="toggle_me()" style="cursor: pointer;">Garth Simpson</span><span class="author_string" final_id="COIMG-167" onclick="toggle_me()" style="cursor: pointer;">, Purdue University (United States)</span><span class="abstract_link" final_id="COIMG-167" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-167" id="abstract-COIMG-167" onclick="toggle_me()" style="display:none; cursor:pointer;">Instrumentation and algorithms to perform label-free chemically-specific imaging of an active pharmaceutical ingredient (API) crystal forms using autofluorescence-detected photothermal mid-IR (AF-PTIR) microscopy are demonstrated. Limits of detection (LoDs) of the most common analytical techniques that enable bulk analysis of crystal polymorphism are insufficient for the detection of trace polymorph impurities. Chemically-specific imaging methods, such as Raman microscopy, enable lowering the LoDs by orders of magnitude. Herein, a complementary label-free technique based on fluorescence detection of mid-IR photothermal effect is shown to achieve parts per million detection limits in discrimination between two crystal forms of indomethacin API. Crystal form assignment confidence was improved by implementing a spectral masking approach designed for a random-access quantum cascade laser (QCL) array. Furthermore, algorithm development for per-particle-based segmentation of particles within a field-of-view and phase-sensitive signal detection is discussed.</p> <p> </p> <p class="presentation_time" style="text-align:left;">9:50<a name="COIMG-168"></a><span style="float: right;">COIMG-168</span> <br> <span class="presentation_title" final_id="COIMG-168" onclick="toggle_me()" style="cursor: pointer;">Multivariate curve resolution with autoencoders for CARS microspectroscopy, </span><span class="author_string" final_id="COIMG-168" onclick="toggle_me()" style="cursor: pointer;">Damien Boildieu<sup>1,</sup><sup>2</sup>, </span><span class="author_string" final_id="COIMG-168" onclick="toggle_me()" style="cursor: pointer;">David Helbert<sup>2</sup>, </span><span class="author_string" final_id="COIMG-168" onclick="toggle_me()" style="cursor: pointer;">Amandine Magnaudeix<sup>3</sup>, </span><span class="author_string" final_id="COIMG-168" onclick="toggle_me()" style="cursor: pointer;">Philippe Leproux<sup>1</sup>, and </span><span class="author_string" final_id="COIMG-168" onclick="toggle_me()" style="cursor: pointer;">Philippe Carré<sup>2</sup></span><span class="author_string" final_id="COIMG-168" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>XLIM, UMR CNRS 7252, University of Limoges, <sup>2</sup>XLIM, UMR CNRS 7252, University of Poitiers, and <sup>3</sup>IRCER, UMR CNRS 7315, University of Limoges (France)</span><span class="abstract_link" final_id="COIMG-168" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-168" id="abstract-COIMG-168" onclick="toggle_me()" style="display:none; cursor:pointer;">Coherent anti-Stokes Raman scattering (CARS) microspectroscopy is a powerful tool for label-free cell imaging thanks to its ability to acquire a rich amount of information. An important family of operations applied to such data is multivariate curve resolution (MCR). It aims to find main components of a dataset and compute their spectra and concentrations in each pixel. Recently, autoencoders began to be studied to accomplish MCR with dense and convolutional models. However, many questions, like the results variability or the reconstruction metric, remain open and applications are limited to hyperspectral imaging. In this article, we present a nonlinear convolutional encoder combined with a linear decoder to apply MCR to CARS microspectroscopy. We conclude with a study of the result variability induced by the encoder initialization.</p> <p> </p> <br> <br> <p class="event_time">10:20 – 10:50 AM Coffee Break</p> <p class="session_title">Computational Imaging Topics (R2)</p> <span class="chair_label">Session Chair: </span> <span class="chair">Charles Bouman, Purdue University (United States)<br> </span> <span class="session_time">10:50 AM – 12:30 PM</span> <br> <span class="room">Market Street<br> </span> <p class="presentation_time" style="text-align:left;">10:50<a name="COIMG-169"></a><span style="float: right;">COIMG-169</span> <br> <span class="presentation_title" final_id="COIMG-169" onclick="toggle_me()" style="cursor: pointer;">BowTie Rasterization for extreme synthetic radiance image rendering, </span><span class="author_string" final_id="COIMG-169" onclick="toggle_me()" style="cursor: pointer;">Thomas L. Burnett, </span><span class="author_string" final_id="COIMG-169" onclick="toggle_me()" style="cursor: pointer;">Justin Halter, and </span><span class="author_string" final_id="COIMG-169" onclick="toggle_me()" style="cursor: pointer;">Justin Jensen</span><span class="author_string" final_id="COIMG-169" onclick="toggle_me()" style="cursor: pointer;">, FoVI3D (United States)</span><span class="abstract_link" final_id="COIMG-169" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-169" id="abstract-COIMG-169" onclick="toggle_me()" style="display:none; cursor:pointer;">The light-field display (LfD) radiance image is a raster description of a light-field where every pixel in the image represents a unique ray within a 3D volume. The LfD radiance image can be projected through an array of micro-lenses to project a perspective-correct 3D aerial image visible for all viewers within the LfDs projection frustum. The synthetic LfD radiance image is comparable to the radiance image as captured by a plenoptic/light-field camera but is rendered from a 3D model or scene. Synthetic radiance image rasterization is an example of extreme multi-view rendering as the 3D scene must be rendered from many (1,000s to millions) viewpoints into small viewports per update of the light-field display. However, GPUs and their accompanying APIs (OpenGL, DirectX, Vulkan) generally expect to render a 3D scene from one viewpoint to a single large viewport/framebuffer. Therefore, LfD radiance image rendering is extremely time consuming and compute intensive. This paper reviews the novel, full-parallax, BowTie Radiance Image Rasterization algorithm which can be embedded within an LfD to accelerate light-field radiance image rendering for real-time update.</p> <p> </p> <p class="presentation_time" style="text-align:left;">11:10<a name="COIMG-170"></a><span style="float: right;">COIMG-170</span> <br> <span class="presentation_title" final_id="COIMG-170" onclick="toggle_me()" style="cursor: pointer;">Automatic parameter tuning for plug-and-play algorithms using generalized cross validation and Stein's unbiased risk estimation for linear inverse problems in computational imaging, </span><span class="author_string" final_id="COIMG-170" onclick="toggle_me()" style="cursor: pointer;">Canberk Ekmekci and </span><span class="author_string" final_id="COIMG-170" onclick="toggle_me()" style="cursor: pointer;">Mujdat Cetin</span><span class="author_string" final_id="COIMG-170" onclick="toggle_me()" style="cursor: pointer;">, University of Rochester (United States)</span><span class="abstract_link" final_id="COIMG-170" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-170" id="abstract-COIMG-170" onclick="toggle_me()" style="display:none; cursor:pointer;">We propose an automatic parameter tuning method for Plug-and-Play (PnP) algorithms that use CNN denoisers. We focus on linear inverse problems and propose an iterative algorithm to calculate generalized cross-validation (GCV) and Stein's unbiased risk estimator (SURE) functions for a half-quadratic splitting based PnP algorithm that uses a state-of-the-art CNN denoiser. The proposed method leverages automatic differentiation to compute exact Jacobian-vector multiplications appearing in the update equations. The parameters can be tuned automatically by minimizing the GCV and SURE functions. Because linear inverse problems appear frequently in computational imaging, the proposed method can be applied to various problems. Furthermore, because the proposed method relies on GCV and SURE functions, it does not require access to the ground truth image and does not require collecting a training dataset, which is highly desirable for imaging applications for which acquiring data is costly and time-consuming. We evaluate the performance of the proposed parameter tuning strategy on a preliminary deblurring experiment and show that the reconstruction performance of the proposed method is comparable to that of the optimal tuning algorithm that adjusts the parameters by maximizing the structural similarity index between the ground truth image and the output of the PnP algorithm.</p> <p> </p> <p class="presentation_time" style="text-align:left;">11:30<a name="COIMG-171"></a><span style="float: right;">COIMG-171</span> <br> <span class="presentation_title" final_id="COIMG-171" onclick="toggle_me()" style="cursor: pointer;">Ultrasound elasticity reconstruction with inaccurate forward model using integrated data-driven correction of data fidelity gradient, </span><span class="author_string" final_id="COIMG-171" onclick="toggle_me()" style="cursor: pointer;">Narges Mohammadi, </span><span class="author_string" final_id="COIMG-171" onclick="toggle_me()" style="cursor: pointer;">Marvin M. Doyley, and </span><span class="author_string" final_id="COIMG-171" onclick="toggle_me()" style="cursor: pointer;">Mujdat Cetin</span><span class="author_string" final_id="COIMG-171" onclick="toggle_me()" style="cursor: pointer;">, University of Rochester (United States)</span><span class="abstract_link" final_id="COIMG-171" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-171" id="abstract-COIMG-171" onclick="toggle_me()" style="display:none; cursor:pointer;">Ultrasound elasticity images, which enable the visualization of quantitative maps of tissue stiffness, can be reconstructed by solving an inverse problem. Classical model-based approaches for ultrasound elastography use deterministic finite element methods (FEMs) to incorporate the governing physical laws leading to poor performance in low SNR conditions. Moreover, these approaches utilize approximate linear forward models discretized by FEMs to describe the underlying physics governed by partial differential equations (PDEs). To achieve highly accurate stiffness images, it is essential to compensate the error induced by noisy measurements and inaccurate forward models. In this regard, we propose a joint model-based and learning-based framework for estimating the elasticity distribution by solving a regularized optimization problem. To address noise, we introduce a statistical representation of the imaging system, which incorporates the noise statistics as a signal-dependent correlated noise model. Moreover, in order to compensate for the model errors, we introduce an explicit data-driven correction model, which can be integrated with any regularization term. This constrained optimization problem is solved using fixed-point gradient descent where the analytical gradient of the inaccurate data-fidelity term is corrected using a neural network, while regularization is achieved by data-driven unrolled regularization by denoising (RED). Both networks are jointly trained in an end-to-end manner.</p> <p> </p> <p class="presentation_time" style="text-align:left;">11:50<a name="COIMG-172"></a><span style="float: right;">COIMG-172</span> <br> <span class="presentation_title" final_id="COIMG-172" onclick="toggle_me()" style="cursor: pointer;">A globally optimal fast-iterative linear maximum likelihood classifier, </span><span class="author_string" final_id="COIMG-172" onclick="toggle_me()" style="cursor: pointer;">Prasanna Reddy Pulakurthi<sup>1</sup>, </span><span class="author_string" final_id="COIMG-172" onclick="toggle_me()" style="cursor: pointer;">Sohail A. Dianat<sup>1</sup>, </span><span class="author_string" final_id="COIMG-172" onclick="toggle_me()" style="cursor: pointer;">Majid Rabbani<sup>1</sup>, </span><span class="author_string" final_id="COIMG-172" onclick="toggle_me()" style="cursor: pointer;">Suya You<sup>2</sup>, and </span><span class="author_string" final_id="COIMG-172" onclick="toggle_me()" style="cursor: pointer;">Raghuveer M. Rao<sup>2</sup></span><span class="author_string" final_id="COIMG-172" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Rochester Institute of Technology and <sup>2</sup>DEVCOM Army Research Laboratory (United States)</span><span class="abstract_link" final_id="COIMG-172" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-172" id="abstract-COIMG-172" onclick="toggle_me()" style="display:none; cursor:pointer;">A novel iterative linear classification algorithm is developed from a maximum likelihood (ML) linear classifier. The main contribution of this paper is the discovery that a well-known maximum likelihood linear classifier is the solution to a contraction mapping for an acceptable range of values of the relaxation parameter. Hence, a novel iterative scheme is proposed that converges to a fixed point, the globally optimum solution. To the best of our knowledge, this formulation has not been discovered before. Furthermore, the proposed iterative solution converges to a fixed point at a rate faster than the traditional gradient-based techniques. Further variations of the iterative algorithm are presented to improve the constraint on the relaxation parameter. Finally, the performance of the proposed iterative solution is compared to conventional gradient descent methods on linear and non-linearly separable data in terms of both convergence speed and overall classification performance.</p> <p> </p> <p class="presentation_time" style="text-align:left;">12:10<a name="COIMG-173"></a><span style="float: right;">COIMG-173</span> <br> <span class="presentation_title" final_id="COIMG-173" onclick="toggle_me()" style="cursor: pointer;">Multimodal contrastive learning for unsupervised video representation learning, </span><span class="author_string" final_id="COIMG-173" onclick="toggle_me()" style="cursor: pointer;">Anup Hiremath and </span><span class="author_string" final_id="COIMG-173" onclick="toggle_me()" style="cursor: pointer;">Avideh Zakhor</span><span class="author_string" final_id="COIMG-173" onclick="toggle_me()" style="cursor: pointer;">, University of California, Berkeley (United States)</span><span class="abstract_link" final_id="COIMG-173" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-173" id="abstract-COIMG-173" onclick="toggle_me()" style="display:none; cursor:pointer;">In this paper, we propose a multimodal unsupervised video learning algorithm designed to incorporate information from any number of modalities present in the data. We cooperatively train a network corresponding to each modality: at each stage of training, one of these networks is selected to be trained using the output of the other networks. To verify our algorithm, we train a model using RGB, optical flow, and audio. We then evaluate the effectiveness of our unsupervised learning model by performing action classification and nearest neighbor retrieval on a supervised dataset. We compare this triple modality model to contrastive learning models using one or two modalities, and find that using all three modalities in tandem provides a 1.5% improvement in UCF101 classification accuracy, a 1.4% improvement in R@1 retrieval recall, a 3.5% improvement in R@5 retrieval recall, and a 2.4% improvement in R@10 retrieval recall as compared to using only RGB and optical flow, demonstrating the merit of utilizing as many modalities as possible in a cooperative learning model.</p> <p> </p> <br> <br> <p class="event_time">12:30 – 2:00 PM Lunch</p> <p class="session_title">Computational Imaging Topics (R3)</p> <span class="chair_label">Session Chair: </span> <span class="chair">Charles Bouman, Purdue University (United States)<br> </span> <span class="session_time">2:00 – 2:40 PM</span> <br> <span class="room">Market Street<br> </span> <p class="presentation_time" style="text-align:left;">2:00<a name="COIMG-174"></a><span style="float: right;">COIMG-174</span> <br> <span class="presentation_title" final_id="COIMG-174" onclick="toggle_me()" style="cursor: pointer;">Hyperspectral learning for mHealth hemodynamic imaging, </span><span class="author_string" final_id="COIMG-174" onclick="toggle_me()" style="cursor: pointer;">Yuhyun Ji, </span><span class="author_string" final_id="COIMG-174" onclick="toggle_me()" style="cursor: pointer;">Sang Mok Park, </span><span class="author_string" final_id="COIMG-174" onclick="toggle_me()" style="cursor: pointer;">Vidhya V. Nair, </span><span class="author_string" final_id="COIMG-174" onclick="toggle_me()" style="cursor: pointer;">Yunjie Tong, and </span><span class="author_string" final_id="COIMG-174" onclick="toggle_me()" style="cursor: pointer;">Young L. Kim</span><span class="author_string" final_id="COIMG-174" onclick="toggle_me()" style="cursor: pointer;">, Purdue University (United States)</span><span class="abstract_link" final_id="COIMG-174" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-174" id="abstract-COIMG-174" onclick="toggle_me()" style="display:none; cursor:pointer;">Hyperspectral imaging offers both spectral and spatial information, which is advantageous for understanding biological or physical signals. However, conventional hyperspectral imaging systems have intrinsic limitations including bulky instrument, high-cost optical components, slow data acquisition, and insufficient spectral resolution. Spectral learning can potentially overcome the limitations of conventional hyperspectral imaging systems. Spectral learning is a mapping from a sparse spectral space (RGB values) to a dense spectral space. However, hyperspectral image recovery from an RGB image is an ill-posed problem and requires a large amount of training data that hamper practical applications. Here we introduce a machine learning approach informed by locally sampled hyperspectral data to train a hyperspectral learning algorithm. The trained hyperspectral learning algorithm can recover a hypercube from an RGB image without complete hyperspectral scanning of the entire area. We apply learning-based hyperspectral imaging to vascular developmental models and peripheral microcirculation in a human to analyze and visualize dynamic changes in oxygen saturation and hemoglobin content. The key advantages of learning-based hyperspectral imaging include the hardware simplicity of using conventional cameras, high temporal resolution when a video is used, no tradeoff between spatial and spectral resolutions, and rich spectral information for a variety of machine learning methods.</p> <p> </p> <p class="presentation_time" style="text-align:left;">2:20<a name="COIMG-175"></a><span style="float: right;">COIMG-175</span> <br> <span class="presentation_title" final_id="COIMG-175" onclick="toggle_me()" style="cursor: pointer;">Deep learning based image registration for 3D magnetic imaging at nanoscale, </span><span class="author_string" final_id="COIMG-175" onclick="toggle_me()" style="cursor: pointer;">Srutarshi Banerjee, </span><span class="author_string" final_id="COIMG-175" onclick="toggle_me()" style="cursor: pointer;">Junjing Deng, </span><span class="author_string" final_id="COIMG-175" onclick="toggle_me()" style="cursor: pointer;">Joerg Strempfer, and </span><span class="author_string" final_id="COIMG-175" onclick="toggle_me()" style="cursor: pointer;">Doga Gursoy</span><span class="author_string" final_id="COIMG-175" onclick="toggle_me()" style="cursor: pointer;">, Argonne National Laboratory Advanced Photon Source (United States)</span><span class="abstract_link" final_id="COIMG-175" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-175" id="abstract-COIMG-175" onclick="toggle_me()" style="display:none; cursor:pointer;">The availability of highly coherent X-ray flux with enhanced control of X-ray polarization from synchrotron facilities such as the Advanced Photon Source at Argonne National Laboratory provides basic resources for dichroic ptycho-tomographic based imaging of magnetic and electronic domains of magnetic materials at the nanoscale. As the resolution is pushed toward the nanoscale, imperfections associated with the stability of the beam and setup lead to jitter in the acquired data during ptycho-tomographic scans. These imperfections may arise from the relative motion between beam, sample and detector in the interferometric hardware setup. Since the dichroic ptycho-tomographic scans must be performed for many rotation angles and at least two tilted rotation axes, precise registration of the acquired data between projections is also critical. Without a proper image registration process, the quality of 3D reconstruction will be inaccurate. In this work, we present a deep learning based image registration process for addressing the misalignments in the experimental setup. We follow an approach of non-rigid registration in the deep learning framework. The algorithm is developed using synthetic and experimental data. Our approach allows a fast and effective model for image registration and is readily applicable to numerous other applications.</p> <p> </p> <br> <br> <script> function toggle_me() { var elm = event.srcElement var final_id = elm.getAttribute("final_id") var the_id = "abstract-" + final_id; var x = document.getElementById(the_id); if (x.style.display === "none"){ x.style.display = "block"; } else { x.style.display = "none"; } }</script></div><div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_ControlErrorPanel_ConferenceHeading" class="Error" style="Display:None;"> </div> </div> </div> </div> </div></div> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_3" class="rmpView rmpHidden"> <div class="ContentTabbedDisplay AddPadding"> <p class="AsiWarning">No content found</p> </div> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_4" class="rmpView rmpHidden"> <div class="ContentTabbedDisplay AddPadding"> <p class="AsiWarning">No content found</p> </div> </div><input id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage_ClientState" name="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage_ClientState" type="hidden" /> </div> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_updatePanel"> <input type="submit" name="ctl01$TemplateBody$WebPartManager1$gwpciConfCCO$ciConfCCO$refreshTrigger" value="Refresh" id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_refreshTrigger" style="display:none" /> </div> </div> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_panStep"> </div> <span id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_debug"></span> </div> </div> </div></div> </div> </div> <div id="ctl01_TemplateBody_ContentPage2_downloadContainer" style="display:none;"> <input type="hidden" name="ctl01$TemplateBody$ContentPage2$HiddenDownloadPathField" id="ctl01_TemplateBody_ContentPage2_HiddenDownloadPathField" /><input type="submit" name="ctl01$TemplateBody$ContentPage2$downloadButton" value="Download Path" id="ctl01_TemplateBody_ContentPage2_downloadButton" style="display:none" /> </div></div> </div> </div> </div> </div> </div> </div> <div data-label="Secondary Navigation" class="col-secondary cs-right d-none"> <div ID="WTZone8_Page1" class="WTZone "> </div> </div> </div> </div> <a class="backToTop" href="#PageTop">Back to Top</a> <footer id="ft" class="footer ClearFix"> <div class="footer-content"> <div class="container" data-label="Footer 1"> <div ID="WTZone9_Page1" class="WTZone "> <div id="ste_container_ciFooterContent1" class="ContentItemContainer"> <div id="ste_container_NewContentHtml2" class="ContentItemContainer"><div class="footer-nav"> <div class="footer-col"> <ul> <li><a href="https://www.imaging.org/">IMAGING.ORG</a></li> <li><a href="/IST/Conferences/Events_Overview.aspx">Events</a></li> <li><a href="/IST/Publications/Publications_Overview.aspx" class="">Publications</a></li> <li><a href="/IST/Standards/TC42.aspx">Standards</a></li> </ul> </div> <div class="footer-col"> <ul> <li><a href="/IST/Resources/Resources_Home.aspx">RESOURCES</a></li> <li><a href="/IST/Resources/CareerCenter.aspx">Careers</a></li> <li><a href="/IST/Policies/Policies.aspx">Policies</a></li> </ul> </div> <div class="footer-col"> <ul> <li><a href="/IST/About/About.aspx">ABOUT US</a></li> <li><a href="/IST/Membership/Individual_Membership.aspx">Membership</a></li> <li><a href="/IST/About/Donations.aspx">Donate</a></li> <li><a href="/IST/About/About.aspx">Contact</a></li> </ul> </div> </div></div><div id="ste_container_FooterContent" class="ContentItemContainer"><div class="FooterTop"><div class="FooterLogo"><img src="/images/75th%20logo%20alt%20blue%20white%20bkgrnd.png" alt=""> </div> <div class="FooterSocial"> <div class="FooterSocialText"> <p>Stay Connected!</p> </div> <div class="FooterSocialImg"><a href="https://www.linkedin.com/company/society-for-imaging-science-and-technology-is&t-"><img src="/images/Icons/linkedin36blue.png" alt="" style="margin-right: 10px;"></a><a href="https://twitter.com/ImagingOrg"><img src="/images/Icons/twitter36blue.png" alt="" style=""></a></div> </div></div></div><div id="ste_container_NewContentHtml1" class="ContentItemContainer"><div class="FooterBottom"><p style="text-align: center;">© Copyright 2023 Society for Imaging Sciences and Technology. All Rights Reserved.</p></div></div><div class="ContentRecordPageButtonPanel"> </div> </div> </div> </div> </div> <div class="footer-nav-copyright"> <div class="container" role="navigation"> <div class="footer-copyright" data-label= "Footer 2"> <div ID="WTZone10_Page1" class="WTZone iPartsDisplayInlineBlock"> </div> </div> </div> </div> </footer> </div> <!--Jscript from Page.ResgisterStartupScript extention is loaded here --> <Div><script type="text/javascript">Sys.Application.add_load(function () {{ MasterPageBase_Init(); }});</script> <script type="text/javascript">Sys.Application.add_load(function() { { BreadCrumb_load('80409b89-ae6d-45a9-a9d4-96d522ff2047'); } }); </script> </Div><input name="ctl01$TemplateScripts$timeoutsoonmsg" type="hidden" id="timeoutsoonmsg" value="PGgyPllvdSBhcmUgYWJvdXQgdG8gYmUgc2lnbmVkIG91dDwvaDI+DQo8cD5Zb3Ugd2lsbCBiZSBzaWduZWQgb3V0IGluIDxzdHJvbmc+W1NlY29uZHNSZW1haW5pbmddPC9zdHJvbmc+IHNlY29uZHMgZHVlIHRvIGluYWN0aXZpdHkuIFlvdXIgY2hhbmdlcyB3aWxsIG5vdCBiZSBzYXZlZC4gVG8gY29udGludWUgd29ya2luZyBvbiB0aGUgd2Vic2l0ZSwgY2xpY2sgIlN0YXkgU2lnbmVkIEluIiBiZWxvdy48L3A+" /><input name="ctl01$TemplateScripts$timeoutsoonstaysignintxt" type="hidden" id="timeoutsoonstaysignintxt" value="U3RheSBTaWduZWQgSW4=" /><input name="ctl01$TemplateScripts$timeoutsoonlogouttxt" type="hidden" id="timeoutsoonlogouttxt" value="U2lnbiBPdXQ=" /><input name="ctl01$TemplateScripts$stayLoggedInURL" type="hidden" id="stayLoggedInURL" /><input name="ctl01$TemplateScripts$logoutUrl" type="hidden" id="logoutUrl" value="aHR0cHM6Ly93d3cuaW1hZ2luZy5vcmcvYXNpY29tbW9uL2NvbnRyb2xzL3NoYXJlZC9mb3Jtc2F1dGhlbnRpY2F0aW9uL2xvZ2luLmFzcHg/U2Vzc2lvblRpbWVvdXQ9MSZSZXR1cm5Vcmw9JTJmSVNUJTJmSVNUJTJmQ29uZmVyZW5jZXMlMmZFSSUyZkVJMjAyMyUyZkNvbmZlcmVuY2UlMmZDX0NPSU1HLmFzcHglM2Y=" /> <!-- Bootstrap Modal --> <div id="BootstrapModal" class="modal fade" tabindex="-1" role="dialog" aria-label="Modal" aria-hidden="true"> <div id="BootstrapDocument" class="modal-dialog modal-xl" role="document"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal" aria-label="Close"> <span aria-hidden="true">×</span> </button> </div> <div class="modal-body p-0 m-0"> <iframe id="ContentFrame" class="modal-content-iframe" width="100%" height="100px" frameborder="0"></iframe> </div> </div> </div> </div> <div id="ctl01_RadAjaxManager1SU"> <span id="ctl01_RadAjaxManager1" style="display:none;"></span> </div><div id="ctl01_WindowManager1" style="display:none;"> <div id="ctl01_GenericWindow" style="display:none;"> <div id="ctl01_GenericWindow_C" style="display:none;"> </div><input id="ctl01_GenericWindow_ClientState" name="ctl01_GenericWindow_ClientState" type="hidden" /> </div><div id="ctl01_ObjectBrowser" style="display:none;"> <div id="ctl01_ObjectBrowser_C" style="display:none;"> </div><input id="ctl01_ObjectBrowser_ClientState" name="ctl01_ObjectBrowser_ClientState" type="hidden" /> </div><div id="ctl01_ObjectBrowserDialog" style="display:none;"> <div id="ctl01_ObjectBrowserDialog_C" style="display:none;"> </div><input id="ctl01_ObjectBrowserDialog_ClientState" name="ctl01_ObjectBrowserDialog_ClientState" type="hidden" /> </div><div id="ctl01_WindowManager1_alerttemplate" style="display:none;"> <div class="rwDialogPopup radalert"> <div class="rwDialogText"> {1} </div> <div> <a onclick="$find('{0}').close(true);" class="rwPopupButton" href="javascript:void(0);"> <span class="rwOuterSpan"> <span class="rwInnerSpan">##LOC[OK]##</span> </span> </a> </div> </div> </div><div id="ctl01_WindowManager1_prompttemplate" style="display:none;"> <div class="rwDialogPopup radprompt"> <div class="rwDialogText"> {1} </div> <div> <script type="text/javascript"> function RadWindowprompt_detectenter(id, ev, input) { if (!ev) ev = window.event; if (ev.keyCode == 13) { var but = input.parentNode.parentNode.getElementsByTagName("A")[0]; if (but) { if (but.click) but.click(); else if (but.onclick) { but.focus(); var click = but.onclick; but.onclick = null; if (click) click.call(but); } } return false; } else return true; } </script> <input title="Enter Value" onkeydown="return RadWindowprompt_detectenter('{0}', event, this);" type="text" class="rwDialogInput" value="{2}" /> </div> <div> <a onclick="$find('{0}').close(this.parentNode.parentNode.getElementsByTagName('input')[0].value);" class="rwPopupButton" href="javascript:void(0);" ><span class="rwOuterSpan"><span class="rwInnerSpan">##LOC[OK]##</span></span></a> <a onclick="$find('{0}').close(null);" class="rwPopupButton" href="javascript:void(0);"><span class="rwOuterSpan"><span class="rwInnerSpan">##LOC[Cancel]##</span></span></a> </div> </div> </div><div id="ctl01_WindowManager1_confirmtemplate" style="display:none;"> <div class="rwDialogPopup radconfirm"> <div class="rwDialogText"> {1} </div> <div> <a onclick="$find('{0}').close(true);" class="rwPopupButton" href="javascript:void(0);" ><span class="rwOuterSpan"><span class="rwInnerSpan">##LOC[OK]##</span></span></a> <a onclick="$find('{0}').close(false);" class="rwPopupButton" href="javascript:void(0);"><span class="rwOuterSpan"><span class="rwInnerSpan">##LOC[Cancel]##</span></span></a> </div> </div> </div><input id="ctl01_WindowManager1_ClientState" name="ctl01_WindowManager1_ClientState" type="hidden" /> </div> <script type="text/javascript"> //<![CDATA[ var gCartCount; var cartDiv = $get("CartItemCount"); if (cartDiv != null){ jQuery.ajax({ type: "POST", url: gWebRoot + "/WebMethodUtilities.aspx/GetCartItemCount", data: "{}", contentType: "application/json; charset=utf-8", dataType: 'json', success: function(result) { if (result.d != '' && result.d != null) { gCartCount = result.d; if (gCartCount != null) { cartDiv.innerHTML = gCartCount; } } }, async: true }); } function CheckForChildren() { var contentRecordPageButtonPanelHasChildren = false; var contentRecordPageButtonPanel = jQuery('div.ContentRecordPageButtonPanel'); for (var i = 0, max = contentRecordPageButtonPanel.length; i < max; i++) { if (contentRecordPageButtonPanel[i].children.length > 0) { contentRecordPageButtonPanelHasChildren = true; break; } } if (!contentRecordPageButtonPanelHasChildren) { jQuery("Body").addClass("TemplateAreaEasyEditOn"); } } if (gIsEasyEditEnabled) CheckForChildren(); //]]> </script> <div class="aspNetHidden"> <input type="hidden" name="__VIEWSTATEGENERATOR" id="__VIEWSTATEGENERATOR" value="6DD74C40" /> </div> <script type="text/javascript"> //<![CDATA[ if(typeof(window['ctl01_TemplateBody_ContentPage1_contentitemdisplaybasejsmanager'])==='undefined') { window['ctl01_TemplateBody_ContentPage1_contentitemdisplaybasejsmanager']=new Asi_WebRoot_AsiCommon_ContentManagement_DownloadDocument(); }if(typeof(window['ctl01_TemplateBody_ContentPage1_contentitemdisplaybasejsmanager'])!=='undefined') { window['ctl01_TemplateBody_ContentPage1_contentitemdisplaybasejsmanager'].OnLoad('#ctl01_TemplateBody_ContentPage1_downloadButton','#ctl01_TemplateBody_ContentPage1_HiddenDownloadPathField'); }if(typeof(window['ctl01_TemplateBody_ContentPage2_contentitemdisplaybasejsmanager'])==='undefined') { window['ctl01_TemplateBody_ContentPage2_contentitemdisplaybasejsmanager']=new Asi_WebRoot_AsiCommon_ContentManagement_DownloadDocument(); }if(typeof(window['ctl01_TemplateBody_ContentPage2_contentitemdisplaybasejsmanager'])!=='undefined') { window['ctl01_TemplateBody_ContentPage2_contentitemdisplaybasejsmanager'].OnLoad('#ctl01_TemplateBody_ContentPage2_downloadButton','#ctl01_TemplateBody_ContentPage2_HiddenDownloadPathField'); }if(typeof(window['ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_contentitemdisplaybasejsmanager'])==='undefined') { window['ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_contentitemdisplaybasejsmanager']=new Asi_WebRoot_AsiCommon_ContentManagement_DownloadDocument(); }if(typeof(window['ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_contentitemdisplaybasejsmanager'])!=='undefined') { window['ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_contentitemdisplaybasejsmanager'].OnLoad('#ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_downloadButton','#ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_HiddenDownloadPathField'); }__Document_Head_Init('https://www.imaging.org/NoCookies.html', '', false);window.__TsmHiddenField = $get('ctl01_ScriptManager1_TSM');NavigationList_NavControlId = '_rptWrapper';NavigationList_Init();var ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties = new SimpleSearchFieldProperties(); ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties.WatermarkClass = 'Watermarked'; ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties.WatermarkText = 'Keyword search'; ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties.SearchTarget = 'https://www.imaging.org/Search'; var ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties = new SimpleSearchFieldProperties(); ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties.WatermarkClass = 'Watermarked'; ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties.WatermarkText = 'Keyword search'; ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties.SearchTarget = 'https://www.imaging.org/Search'; NavigationList_NavControlId = 'ctl01_ciPrimaryNavigation_NavControl_NavMenu';NavigationList_Init();PageNavR_NavMenuClientID = 'ctl01_ciPrimaryNavigation_NavControl_NavMenu';var __wpmExportWarning='This Web Part Page has been personalized. As a result, one or more Web Part properties may contain confidential information. Make sure the properties contain information that is safe for others to read. After exporting this Web Part, view properties in the Web Part description file (.WebPart) by using a text editor such as Microsoft Notepad.';var __wpmCloseProviderWarning='You are about to close this Web Part. It is currently providing data to other Web Parts, and these connections will be deleted if this Web Part is closed. To close this Web Part, click OK. To keep this Web Part, click Cancel.';var __wpmDeleteWarning='You are about to permanently delete this Web Part. Are you sure you want to do this? To delete this Web Part, click OK. To keep this Web Part, click Cancel.';__wpm = new WebPartManager(); __wpm.overlayContainerElement = document.getElementById('ctl01_TemplateBody_WebPartManager1___Drag'); __wpm.personalizationScopeShared = false; var zoneElement; var zoneObject; zoneElement = document.getElementById('ctl01_TemplateBody_ContentPage1_WebPartZone1_Page1');if (zoneElement != null) {zoneObject = __wpm.AddZone(zoneElement, 'ctl01$TemplateBody$ContentPage1$WebPartZone1_Page1', true, false, 'Blue'); zoneObject.AddWebPart(document.getElementById('WebPart_gwpciCornerArt'), document.getElementById('WebPartTitle_gwpciCornerArt'), false); zoneObject.AddWebPart(document.getElementById('WebPart_gwpciSponsors_711904745b114fcda30b29f874f3130e'), document.getElementById('WebPartTitle_gwpciSponsors_711904745b114fcda30b29f874f3130e'), false); }zoneElement = document.getElementById('ctl01_TemplateBody_ContentPage2_WebPartZone2_Page1');if (zoneElement != null) {zoneObject = __wpm.AddZone(zoneElement, 'ctl01$TemplateBody$ContentPage2$WebPartZone2_Page1', true, false, 'Blue'); zoneObject.AddWebPart(document.getElementById('WebPart_gwpciConfCCO'), document.getElementById('WebPartTitle_gwpciConfCCO'), false); }if(typeof(window['ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_jsmanager'])=='undefined') { window['ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_jsmanager']=new Asi_Web_iParts_ContentCollectionOrganizer_ContentCollectionOrganizerDisplay('ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage', 'False'); }Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadMenu, {"_childListElementCssClass":"rmRootGroup rmToggleHandles rmHorizontal","_skin":"NaturalHeritageSites","attributes":{"Translate":"Yes","PerspectiveId":"80409b89-ae6d-45a9-a9d4-96d522ff2047","NavigationArea":"1","MaxDataBindDepth":"3"},"autoScrollMinimumWidth":100,"clientStateFieldID":"ctl01_ciPrimaryNavigation_NavControl_NavMenu_ClientState","collapseAnimation":"{\"duration\":450}","defaultGroupSettings":"{\"flow\":0,\"expandDirection\":2,\"offsetX\":0}","expandAnimation":"{\"duration\":450}","itemData":[],"showToggleHandle":true}, {"itemClicking":PageNavR_OnClientItemClicking,"itemClosed":PageNavR_OnItemClosed,"itemOpened":PageNavR_OnItemOpened,"load":PageNavR_OnClientLoadHandler}, null, $get("ctl01_ciPrimaryNavigation_NavControl_NavMenu")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadTabStrip, {"_autoPostBack":true,"_postBackOnClick":true,"_postBackReference":"__doPostBack(\u0027ctl01$TemplateBody$WebPartManager1$gwpciConfCCO$ciConfCCO$radTab_Top\u0027,\u0027arguments\u0027)","_scrollButtonsPosition":1,"_selectedIndex":1,"_skin":"MetroTouch","causesValidation":false,"clientStateFieldID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radTab_Top_ClientState","enableAriaSupport":true,"multiPageID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage","selectedIndexes":["1"],"tabData":[{"value":"1","_implPageViewID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_1","attributes":{"translate":"yes"}},{"value":"2","_implPageViewID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_2","attributes":{"translate":"yes"}},{"value":"3","_implPageViewID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_3","attributes":{"translate":"yes"}},{"value":"4","_implPageViewID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_4","attributes":{"translate":"yes"}}]}, null, null, $get("ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radTab_Top")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadMultiPage, {"clientStateFieldID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage_ClientState","pageViewData":[{"id":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_1"},{"id":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_2"},{"id":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_3"},{"id":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_4"}],"selectedIndex":1}, null, null, $get("ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadAjaxManager, {"_updatePanels":"","ajaxSettings":[],"clientEvents":{OnRequestStart:"",OnResponseEnd:""},"defaultLoadingPanelID":"AjaxStatusLoadingPanel","enableAJAX":true,"enableHistory":false,"links":[],"styles":[],"uniqueID":"ctl01$RadAjaxManager1","updatePanelsRenderMode":0}, null, null, $get("ctl01_RadAjaxManager1")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadWindow, {"_dockMode":false,"behaviors":117,"clientStateFieldID":"ctl01_GenericWindow_ClientState","enableAriaSupport":true,"formID":"aspnetForm","height":"550px","iconUrl":"","localization":"{\"Close\":\"Close\",\"Maximize\":\"Maximize\",\"Minimize\":\"Minimize\",\"Reload\":\"Reload\",\"PinOn\":\"PinOn\",\"PinOff\":\"PinOff\",\"Restore\":\"Restore\",\"OK\":\"OK\",\"Cancel\":\"Cancel\",\"Yes\":\"Yes\",\"No\":\"No\"}","minimizeIconUrl":"","modal":true,"name":"GenericWindow","shortcuts":"[[\u0027close\u0027,\u0027Esc\u0027]]","showContentDuringLoad":false,"skin":"MetroTouch","visibleStatusbar":false,"width":"800px"}, null, null, $get("ctl01_GenericWindow")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadWindow, {"_dockMode":false,"behaviors":117,"clientStateFieldID":"ctl01_ObjectBrowser_ClientState","enableAriaSupport":true,"formID":"aspnetForm","height":"550px","iconUrl":"","localization":"{\"Close\":\"Close\",\"Maximize\":\"Maximize\",\"Minimize\":\"Minimize\",\"Reload\":\"Reload\",\"PinOn\":\"PinOn\",\"PinOff\":\"PinOff\",\"Restore\":\"Restore\",\"OK\":\"OK\",\"Cancel\":\"Cancel\",\"Yes\":\"Yes\",\"No\":\"No\"}","minimizeIconUrl":"","modal":true,"name":"ObjectBrowser","shortcuts":"[[\u0027close\u0027,\u0027Esc\u0027]]","showContentDuringLoad":false,"skin":"MetroTouch","visibleStatusbar":false,"width":"760px"}, null, null, $get("ctl01_ObjectBrowser")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadWindow, {"_dockMode":false,"behaviors":117,"clientStateFieldID":"ctl01_ObjectBrowserDialog_ClientState","enableAriaSupport":true,"formID":"aspnetForm","height":"400px","iconUrl":"","localization":"{\"Close\":\"Close\",\"Maximize\":\"Maximize\",\"Minimize\":\"Minimize\",\"Reload\":\"Reload\",\"PinOn\":\"PinOn\",\"PinOff\":\"PinOff\",\"Restore\":\"Restore\",\"OK\":\"OK\",\"Cancel\":\"Cancel\",\"Yes\":\"Yes\",\"No\":\"No\"}","minimizeIconUrl":"","modal":true,"name":"ObjectBrowserDialog","shortcuts":"[[\u0027close\u0027,\u0027Esc\u0027]]","showContentDuringLoad":false,"skin":"MetroTouch","visibleStatusbar":false,"width":"600px"}, null, null, $get("ctl01_ObjectBrowserDialog")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadWindowManager, {"behaviors":117,"clientStateFieldID":"ctl01_WindowManager1_ClientState","enableAriaSupport":true,"formID":"aspnetForm","iconUrl":"","localization":"{\"Close\":\"Close\",\"Maximize\":\"Maximize\",\"Minimize\":\"Minimize\",\"Reload\":\"Reload\",\"PinOn\":\"PinOn\",\"PinOff\":\"PinOff\",\"Restore\":\"Restore\",\"OK\":\"OK\",\"Cancel\":\"Cancel\",\"Yes\":\"Yes\",\"No\":\"No\"}","minimizeIconUrl":"","name":"WindowManager1","shortcuts":"[[\u0027close\u0027,\u0027Esc\u0027]]","skin":"MetroTouch","windowControls":"['ctl01_GenericWindow','ctl01_ObjectBrowser','ctl01_ObjectBrowserDialog']"}, null, {"child":"ctl01_GenericWindow"}, $get("ctl01_WindowManager1")); }); //]]> </script> </form> <div id="fb-root"></div> </body> </html>