CINXE.COM

Image Quality and System Performance XX (IQSP)

<!DOCTYPE html> <html id="MainHtml" lang="en" class="html-main"> <head id="ctl01_Head1"><script type="text/javascript"> !function(v,y,T){var S=v.location,k="script",D="instrumentationKey",C="ingestionendpoint",I="disableExceptionTracking",E="ai.device.",b="toLowerCase",w=(D[b](),"crossOrigin"),N="POST",e="appInsightsSDK",t=T.name||"appInsights",n=((T.name||v[e])&&(v[e]=t),v[t]||function(l){var u=!1,d=!1,g={initialize:!0,queue:[],sv:"6",version:2,config:l};function m(e,t){var n={ },a="Browser";return n[E+"id"]=a[b](),n[E+"type"]=a,n["ai.operation.name"]=S&&S.pathname||"_unknown_",n["ai.internal.sdkVersion"]="javascript:snippet_"+(g.sv||g.version),{time:(a=new Date).getUTCFullYear()+"-"+i(1+a.getUTCMonth())+"-"+i(a.getUTCDate())+"T"+i(a.getUTCHours())+":"+i(a.getUTCMinutes())+":"+i(a.getUTCSeconds())+"."+(a.getUTCMilliseconds()/1e3).toFixed(3).slice(2,5)+"Z",iKey:e,name:"Microsoft.ApplicationInsights."+e.replace(/-/g,"")+"."+t,sampleRate:100,tags:n,data:{baseData:{ver:2}}};function i(e){e = "" + e;return 1===e.length?"0"+e:e}}var e,n,f=l.url||T.src;function a(e){var t,n,a,i,o,s,r,c,p;u=!0,g.queue=[],d||(d=!0,i=f,r=(c=function(){var e,t={ },n=l.connectionString;if(n)for(var a=n.split(";"),i=0;i<a.length;i++){var o=a[i].split("=");2===o.length&&(t[o[0][b]()]=o[1])}return t[C]||(t[C]="https://"+((e=(n=t.endpointsuffix)?t.location:null)?e+".":"")+"dc."+(n||"services.visualstudio.com")),t}()).instrumentationkey||l[D]||"",c=(c=c[C])?c+"/v2/track":l.endpointUrl,(p=[]).push((t="SDK LOAD Failure: Failed to load Application Insights SDK script (See stack for details)",n=i,o=c,(s=(a=m(r,"Exception")).data).baseType="ExceptionData",s.baseData.exceptions=[{typeName:"SDKLoadFailed",message:t.replace(/\./g,"-"),hasFullStack:!1,stack:t+"\nSnippet failed to load ["+n+"] -- Telemetry is disabled\nHelp Link: https://go.microsoft.com/fwlink/?linkid=2128109\nHost: "+(S&&S.pathname||"_unknown_")+"\nEndpoint: "+o,parsedStack:[]}],a)),p.push((s=i,t=c,(o=(n=m(r,"Message")).data).baseType="MessageData",(a=o.baseData).message='AI (Internal): 99 message:"'+("SDK LOAD Failure: Failed to load Application Insights SDK script (See stack for details) ("+s+")").replace(/\"/g,"")+'"',a.properties={endpoint:t},n)),i=p,r=c,JSON&&((o=v.fetch)&&!T.useXhr?o(r,{method:N,body:JSON.stringify(i),mode:"cors"}):XMLHttpRequest&&((s=new XMLHttpRequest).open(N,r),s.setRequestHeader("Content-type","application/json"),s.send(JSON.stringify(i)))))}function i(e,t){d || setTimeout(function () { !t && g.core || a() }, 500)}f&&((n=y.createElement(k)).src=f,!(o=T[w])&&""!==o||"undefined"==n[w]||(n[w]=o),n.onload=i,n.onerror=a,n.onreadystatechange=function(e,t){"loaded" !== n.readyState && "complete" !== n.readyState || i(0, t)},e=n,T.ld<0?y.getElementsByTagName("head")[0].appendChild(e):setTimeout(function(){y.getElementsByTagName(k)[0].parentNode.appendChild(e)},T.ld||0));try{g.cookie = y.cookie}catch(h){ }function t(e){for(;e.length;)!function(t){g[t] = function () { var e = arguments; u || g.queue.push(function () { g[t].apply(g, e) }) }}(e.pop())}var s,r,o="track",c="TrackPage",p="TrackEvent",o=(t([o+"Event",o+"PageView",o+"Exception",o+"Trace",o+"DependencyData",o+"Metric",o+"PageViewPerformance","start"+c,"stop"+c,"start"+p,"stop"+p,"addTelemetryInitializer","setAuthenticatedUserContext","clearAuthenticatedUserContext","flush"]),g.SeverityLevel={Verbose:0,Information:1,Warning:2,Error:3,Critical:4},(l.extensionConfig||{ }).ApplicationInsightsAnalytics||{ });return!0!==l[I]&&!0!==o[I]&&(t(["_"+(s="onerror")]),r=v[s],v[s]=function(e,t,n,a,i){var o=r&&r(e,t,n,a,i);return!0!==o&&g["_"+s]({message:e,url:t,lineNumber:n,columnNumber:a,error:i,evt:v.event}),o},l.autoExceptionInstrumented=!0),g}(T.cfg));function a(){T.onInit && T.onInit(n)}(v[t]=n).queue&&0===n.queue.length?(n.queue.push(a),n.trackPageView({ })):a()}(window,document,{ src: "https://js.monitor.azure.com/scripts/b/ai.2.min.js", crossOrigin: "anonymous", onInit: function (sdk) { window.appInsights.context.telemetryTrace.traceID = "9d7c27fff7af11de895e571b3c5bfdd2"; sdk.addTelemetryInitializer(function (envelope) { envelope.data = envelope.data || {}; envelope.data.TenantId = "SIST"; }); }, cfg: { // Application Insights Configuration connectionString: "InstrumentationKey=96392a35-9d5a-4f7c-adcb-1a186bd1a320;IngestionEndpoint=https://eastus-8.in.applicationinsights.azure.com/;LiveEndpoint=https://eastus.livediagnostics.monitor.azure.com/", autoTrackPageVisitTime:true, enableRequestHeaderTracking: true, disableCookiesUsage: true }}); </script><meta charset="UTF-8" /><title> Image Quality and System Performance XX (IQSP) </title> <meta name="Description" content="This conference brings together engineers and scientists focused on what makes a high-quality image, and how to specify the requirements and assess the performance of modern imaging systems." /> <meta property="og:description" content="This conference brings together engineers and scientists focused on what makes a high-quality image, and how to specify the requirements and assess the performance of modern imaging systems." /> <meta name="Keywords" content="Image Quality, System Performance, Human Visual Perception, Image Processing, Subjective and Objective Quality Assessment" /> <link rel="stylesheet" href="/Assets/css/10-UltraWaveResponsive.css?v=638750753400000000"><!–– Begin Google Script ––> <script type="text/javascript"> var _gaq = _gaq || []; _gaq.push(['_setAccount', 'UA-12227505-1']); _gaq.push(['_trackPageview']); (function() { var ga = document.createElement('script'); ga.type = 'text/javascript'; ga.async = true; ga.src = ('https:' == document.location.protocol ? 'https://ssl' : 'http://www') + '.google-analytics.com/ga.js'; var s = document.getElementsByTagName('script')[0]; s.parentNode.insertBefore(ga, s); })(); </script> <!–– End Google Script ––> <!–– Begin Informz Script ––> <script> //------------------------------------------------------- var z_account = "EDE8547A-6395-48C6-AF0F-9F17F3CE9387"; var z_collector = "ist.informz.net"; var z_cookieDomain = ".imaging.org"; //------------------------------------------------------- (function (e, o, n, r, t, a, s) { e[t] || (e.GlobalSnowplowNamespace = e.GlobalSnowplowNamespace || [], e.GlobalSnowplowNamespace.push(t), e[t] = function () { (e[t].q = e[t].q || []).push(arguments) }, e[t].q = e[t].q || [], a = o.createElement(n), s = o.getElementsByTagName(n)[0], a.async = 1, a.src = r, s.parentNode.insertBefore(a, s)) }(window, document, "script", "https://"+z_collector+"/web_trk/sp.js", "informz_trk")), informz_trk("newTracker", "infz", z_collector + "/web_trk/collector/", { appId: z_account, cookieDomain: z_cookieDomain }), informz_trk("setUserIdFromLocation", "_zs"), informz_trk("enableActivityTracking", 30, 15); informz_trk("trackPageView", null); </script> <!–– End Informz Script ––> <link rel="shortcut icon" href="/images/ist-logo-white.png" /> <link rel="canonical" href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_IQSP.aspx" /> <script type="text/javascript"> //<![CDATA[ var gDocumentTitle = document.title; var gPostBackFormObject = null; var gWindowOnLoad = new Array(); var gPostBackFormProcessSubmit = true; var gWebRoot = ''; var gWebSiteRoot = 'https://www.imaging.org'; var gPageTheme = 'IST-GlacierBay-Responsive'; var gWebsiteKey = '6d978a6f-475d-46cc-bcf2-7a9e3d5f8f82'; var gHKey = ''; var gIsPostBack = false; var gDefaultConfirmMessage = 'Continue without saving changes?'; var gIsEasyEditEnabled = false; var gMasterPage = '/templates/masterpages/wst-ist_naturalheritagesitesresponsive.master'; var gCurrentCultureCode = 'en-US'; if (gWebsiteKey != 'fbdf17a3-cae7-4943-b1eb-71b9c0dd65d2' && gWebsiteKey != 'fad2fd17-7e27-4c96-babe-3291ecde4822') { gIsEasyEditEnabled = false; } //]]> </script><script src="/AsiCommon/Scripts/Modernizr/Modernizr.min.js" type="text/javascript"></script><meta name="viewport" content="width=device-width, initial-scale=1.0" /> <script type="text/javascript"> // Add a class to the <html> tag of browsers that do not support Media Queries var mqSupported = Modernizr.mq('only all'); if (!mqSupported) { document.documentElement.className += ' no-mqs'; } </script> <link href="../../../../../../App_Themes/IST-GlacierBay-Responsive/99-GlacierBay_Responsive.css" type="text/css" rel="stylesheet" /><script src="/AsiCommon/Scripts/Jquery/Jquery.min.js" type="text/javascript" ></script><script src="/AsiCommon/Scripts/Jquery/jquery-migrate.min.js" type="text/javascript" ></script><script src="/AsiCommon/Scripts/Jquery/jquery-ui.min.js" type="text/javascript" ></script><style type="text/css">@import url("//fonts.googleapis.com/css?family=Raleway:200"); @import url('https://fonts.googleapis.com/css2?family=Open+Sans:ital,wght@0,300;0,400;0,600;0,700;0,800;1,300;1,400;1,600;1,700;1,800&display=swap'); @import url('https://fonts.googleapis.com/css2?family=Montserrat:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&display=swap'); @import url('https://fonts.googleapis.com/css2?family=Jost:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&display=swap'); @import url('https://fonts.googleapis.com/css2?family=Work+Sans:ital,wght@0,100;0,200;0,300;0,400;0,500;0,550;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,550;1,600;1,700;1,800;1,900&display=swap'); :root { --header-font-family: 'Jost', sans-serif; --body-font-family: 'Montserrat', sans-serif; } body { font-family: 'Montserrat', sans-serif; color: #0c0c0c; font-size: 14px; line-height:1.4; } h1 { color: #737373; background-color: white; font-family: 'Jost', sans-serif; font-size: 24px; font-weight: 500; text-transform: none; text-align: left; margin-top: 10px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 0px; padding-bottom: 0px; padding-left: 0; padding-right: 0; line-height: 1em; } h2 { color: #ffffff; background-color: #145098; font-family: 'Open Sans', sans-serif; font-size: 18px; text-transform: uppercase; font-weight: 400; display: block; padding-top: 2px; padding-bottom: 2px; padding-left: .33em; padding-right: 3px; margin-top: 1em; margin-bottom: .5em; margin-left: 0px; margin-right: 0px; margin-inline-start: 0px; margin-inline-end: 0px; border-bottom: 1px none #145098; } h3, h3.rightlink { color: #e68600; font-size: 18px; font-weight: 400; font-family: 'Jost', sans-serif; text-transform: uppercase; display: block; border-bottom: 0px solid #ddd; margin: 20px 20px 20px 20px; margin-block-start: 0em; margin-block-end: 0em; margin-inline-start: 0px; margin-inline-end: 0px; } h4, .h4, .SectionLabel { color: #000; font-size: 18px; font-weight: 400; font-family: 'Jost', sans-serif; text-transform: none; display: block; } h5 { color: #e68600; font-size: 16px; font-weight: 400; font-family: 'Jost', sans-serif; text-transform: none; display: block; border-bottom: #e68600 1px solid; } h6 { color: #000; font-size: 16px; font-weight: normal; font-family: 'Jost', sans-serif; text-transform: none; display: block; } a, .Link, .RadGrid.RadGrid .rgRow a, .RadGrid.RadGrid .rgAltRow a, .RadGrid.RadGrid .rgEditForm a, .k-grid.k-grid a, .RadToolTip.RadToolTip a { color: #0032a0; text-decoration: none; } a:visited, .Link:visited, .RadGrid.RadGrid .rgRow a:visited, .RadGrid.RadGrid .rgAltRow a:visited, .RadGrid.RadGrid .rgEditForm a:visited, .k-grid.k-grid a:visited, .RadToolTip.RadToolTip a:visited { color: #0032a0; text-decoration: none; } a:hover, a:focus, a:active, .Link:hover, .Link:focus, .Link:active, .RadGrid.RadGrid .rgRow a:hover, .RadGrid.RadGrid .rgRow a:focus, .RadGrid.RadGrid .rgRow a:active, .RadGrid.RadGrid .rgAltRow a:hover, .RadGrid.RadGrid .rgAltRow a:focus, .RadGrid.RadGrid .rgAltRow a:active, .RadGrid.RadGrid .rgEditForm a:hover, .RadGrid.RadGrid .rgEditForm a:focus, .RadGrid.RadGrid .rgEditForm a:active, .k-grid.k-grid a:hover, .k-grid.k-grid a:focus, .k-grid.k-grid a:active, .RadToolTip.RadToolTip a:hover, .RadToolTip.RadToolTip a:focus, .RadToolTip.RadToolTip a:active { color: #7f7f7f; text-decoration: underline ; } p { font-size: 14px; font-weight: 400; } .PrimaryButton, .UsePrimaryButton .TextButton, .RadGrid input.PrimaryButton { background-color: #9a9a9a; border-color: transparent; } .ISTButton { border: none; border-radius: 8px; background-color: #c00000; color: #ffffff; padding: 6px 12px; text-align: center; text-decoration: none; display: inline-block; font-size: 14px; margin: 4px 2px; cursor: pointer; } .ISTButton:hover { background-color: #9a9a9a; color: #ffffff; text-decoration: none; } .ISTButton:visited { color: #ffffff; text-decoration: none; } body:not(.Wrapper-HomePage) #hd, body:not(.Wrapper-HomePage) .InternalHeaderArea { height: 140px; background: #fff; } body:not(.Wrapper-HomePage) #hd, body:not(.Wrapper-HomePage) .InternalHeaderArea { min-height: 140px; height: 140px; box-shadow: 0 0.3em 0.6em #888; } body:not(.Wrapper-HomePage) .header-top-container #masterLogoArea { top: 19px; } body:not(.Wrapper-HomePage) .primary-navigation-area { margin-top: 10px; width: 100%; margin-left: 0px; } div#ctl01_ciPrimaryNavigation_NavControl_NavMenu { margin-top: -10px; margin-bottom: -6px; } body:not(.Wrapper-HomePage) .header-bottom-container { min-height: 40px; margin-top: 35px; top: 85px; background-color: #004a80; } .navbar-collapse, .searchbar-collapse { padding-left: 0; padding-right: 0; border: none; background: none; padding-top: 0px; } .RadMenu.RadMenu_NaturalHeritageSites .rmText, .RadMenu.RadMenu_NaturalHeritageSites .rmHorizontal .rmText, .RadMenu.RadMenu_NaturalHeritageSites .rmSlide .rmText { padding: 10px 12px; } .RadMenu.RadMenu_NaturalHeritageSites .rmSlide ul.rmGroup { padding-right:0px; } .RadMenu.MegaDropDownMenuOuter ul.rmLevel1 div.rmSlide, .RadMenu.MegaDropDownMenuOuter ul.rmLevel1 ul div.rmSlide { position:absolute; } .RadMenu.MegaDropDownMenuOuter ul.rmVertical.rmGroup.rmLevel1 li ul.rmVertical.rmGroup.rmLevel2 { width:200px; display:none; position:absolute; visibility:hidden; left:-108px; } .RadMenu.MegaDropDownMenuOuter ul.rmVertical.rmGroup.rmLevel1 li, ul.rmVertical.rmGroup.rmLevel2 li { font-size:14px; line-height:16px; padding: 1px 0px; } .RadMenu.MegaDropDownMenuOuter ul.rmLevel2 > .rmItem { width:200px; } .RadMenu_NaturalHeritageSites .rmSlide .rmExpandRight, .RadMenu_NaturalHeritageSites .rmSlide .rmGroup a.rmLink.rmExpandRight { background-image: url(/images/ResponsiveNavMed.png); background-position: 100% 0px; background-position-y:center; margin-right:10px; } .RadMenu.RadMenu_NaturalHeritageSites .rmTwoLevels .rmLevel1 > .rmItem > .rmLink, .RadMenu.RadMenu_NaturalHeritageSites .rmTwoLevels .rmLevel1 > .rmItem.rmDisabled > .rmLink:hover, .RadMenu.RadMenu_NaturalHeritageSites .rmTwoLevels .rmLevel1 > .rmItem.rmDisabled > .rmLink:focus { text-transform: capitalize; } .RadMenu.RadMenu_NaturalHeritageSites .rmRootGroup > .rmItem > .rmLink { color: #fff; font-family: Arial, Helvetica, sans-serif; font-size: 16px; font-weight: 200; letter-spacing: 0px; text-transform: uppercase; } .RadMenu.RadMenu_NaturalHeritageSites .rmSlide .rmText { padding-right: 0px; } ul.rmVertical.rmGroup.rmLevel1 { min-width:340px; } .RadMenu.RadMenu_NaturalHeritageSites .rmRootGroup > .rmItem > .rmLink:hover, .RadMenu.RadMenu_NaturalHeritageSites .rmTwoLevels .rmLevel1 > .rmItem.rmDisabled > .rmLink:focus { color: #004a80; } .RadMenu_NaturalHeritageSites .rmSlide .rmLink:hover, .RadMenu_NaturalHeritageSites .rmSlide .rmLink:focus, .RadMenu_NaturalHeritageSites .rmSlide .rmLink.rmFocused, .RadMenu_NaturalHeritageSites .rmSlide .rmLevel1 > .rmItem > .rmLink:hover, .RadMenu_NaturalHeritageSites .rmSlide .rmLevel1 > .rmItem > .rmLink:focus, .RadMenu_NaturalHeritageSites .rmSlide .rmLevel1 > .rmItem > .rmLink.rmFocused { background-color: transparent; color: #004a80; font-weight: bold; } ul.rmRootGroup.rmToggleHandles.rmHorizontal { height: 45px; margin: 0px; } nav#asi_BreadCrumbNav { font-family: arial, sans-serif; font-size: 13px; } .panel-title > a, .panel-title > a:link, .panel-title > a:hover, .panel-title > a:focus, .panel-title > a:active, .panel-title > a:visited { color: #145098; text-decoration: none; font-family: Arial, sans-serif; font-weight: normal; font-size: 24px; background: none; } h2.panel-title { background: none; color: #145098 } .event h2, .h2, .PanelTitle { background: none; } .HomeLogin h2 { background-color: #fff; text-transform: none; font-family: arial,sans-serif; padding-left: 0px; } .RadTabStrip .rtsUL, .RadTabStripVertical .rtsUL { font-family: arial, sans-serif; font-size: 12px; font-weight: bold; } .RadTabStrip_MetroTouch.RadTabStrip_MetroTouch .rtsLevel1 .rtsSelected { background-color: #004a80 !important; border-color: #ccc!important; color: white; text-decoration: none !important; border-top-left-radius: 10px; border-top-right-radius: 10px; } .RadTabStrip_MetroTouch .rtsLevel1 .rtsLink { margin: 0 0 0 5px; border: 1px solid #e0dfdf; background-color: #f9f9f9; border-top-left-radius: 10px; border-top-right-radius: 10px; margin-left:5px; } div#ctl01_TemplateBody_WebPartManager1_gwpciMAINContentCollectionOrganizerCommon2_ciMAINContentCollectionOrganizerCommon2_radPage { font-family: arial, sans-serif; font-size: 14px; } .obo-panel { margin-top: 50px; } .navbar-header .NavigationLink { color: #004a80; padding-left: 0px; } .UtilityNavigation .account-menu > li > a, .UtilityNavigation .account-menu > li > a:hover, .UtilityNavigation .account-menu > li > a:visited, .UtilityNavigation .account-menu > li > a:active { color: #004a80; } .account-menu .profile-picture-wrapper { position: relative; display: none; width: 34px; } .HeaderSocial { float: left; margin-top: -75px; margin-left: -50px; } li#ctl01_ciUtilityNavigation_ctl01_OBOToggleLI { display: none; } .header-top-container .navbar-header { position: absolute; right: 0; top: 10px; } .header-search .search-field { padding: 0 0 0 7px; padding-left: 34px !important; background-position: 4px -15px !important; background-color: #fff; border: #004a80 .25px solid; height: 25px; background-size: 16px; } a.ste-toggle.off { margin-top: -4px; background-color: #fff; } .UtilitySection.UtilityAccountArea { font-family: Arial, san-serif; } .UtilitySection.UtilitySearch { float:right; margin-top:-8px; } .obo-label { font-weight: bold; font-family: arial, sans-serif; font-size: 12px; } .obo-actions { display: inline-block; font-family: arial, sans-serif; font-size: 12px; } .ISTLeftNav_BigButtonSingle { background-color: #eeeeee; font-family: jost, sans-serif; font-size: 13px; text-transform: uppercase; color: #0c0c0c; margin-top: 10px; } .ISTLeftNav_BigButtonSingle a { color: #0c0c0c; } .ISTLeftNav_BigButtonSingle a:visited { color: #0c0c0c; } .ISTLeftNav_BigButtonSingle li a:hover{ color: #004a80; font-weight: bold; text-decoration: none; } #masterContentArea { margin-top: 50px; } .footer-content { background-color: #fff; color: #004a80; text-align: center; border-top: #004a80 1px solid; padding-top:40px; } .footer-nav { float: left; position: absolute; width: 45%; } .footer-col { float: left; width: 30%; text-align: left; } .footer-col ul { list-style: none; text-transform: none; padding-left: 0; } .footer-col li { padding-top: 10px; font-size: 15px; font-family: 'Jost'; } .footer-col a, .footer-col a:link { text-decoration: none; color: #004a80; } .footer-col a:hover { text-decoration: underline; color: #191919; } .FooterLogo { margin-left: 40px; } .FooterLogo img { margin-right: 0px; width: 120px; } .FooterSocial { text-align: left; padding: 25px 0 0 0; height: 95px; } .FooterSocial p { margin: 13px 20px 0px 0; float: left; } .FooterTop { float: right; width: 20%; } .FooterBottom { font-family: Arial, Sans-Serif; margin-top: 0px; } .FooterBottom p { font-size: 11px; } .FooterBottom a, .FooterBottom a:link { text-decoration: none; color: #004a80; } .FooterBottom a:hover { text-decoration: underline; color: #191919; } .footer-nav-copyright { background-color: #fff; color: #004a80; } .backToTop { bottom: 0px; background: transparent url(/images/icons/BackToTop.png) 3px 0 no-repeat !important; background-color: #fff !important; } @media (max-width: 992px) { body:not(.Wrapper-HomePage) .primary-navigation-area { margin: 0px; } div#ste_container_ciUtilityNavigation { float: left; } div#ste_container_ciNewUtilityNavigationCommon2 { float: right; } .navbar-header .NavigationLink { color: #004a80; padding-left: 10px; } .OnBehalfOfContainer { width: 100%; float: right; } .header-container .nav-auxiliary { margin-top: -35px; position: relative; } .header-container .UtilityNavigation.nav-auxiliary { margin-bottom: 10px; margin-top: -10px; } .ClearFix.header-bottom-container { background-color: #fff; } .header-logo-container { height: 100px; } .HeaderSocial { display:none; } body:not(.Wrapper-HomePage) #hd, body:not(.Wrapper-HomePage) .InternalHeaderArea { height: 0px; min-height: 1px; } body:not(.Wrapper-HomePage) .header-bottom-container { min-height: 40px; margin-top: 0px; top: 65px; background-color: #fff; } div#mainContentWrapper { margin-top: 30px; } #masterContentArea { margin-top:30px; } .RadMenu.RadMenu_NaturalHeritageSites .rmRootGroup > .rmItem > .rmLink { color: #004a80; } .RadMenu.RadMenu_NaturalHeritageSites .rmRootGroup > .rmItem > .rmLink:expanded { color: #004a80; } .RadMenu.RadMenu_NaturalHeritageSites .rmRootGroup > .rmItem > .rmLink:hover { color: #004a80; } .RadMenu_NaturalHeritageSites .rmSlide .rmExpandRight, .RadMenu_NaturalHeritageSites .rmSlide .rmGroup a.rmLink.rmExpandRight { background-image: none; } nav#asi_BreadCrumbNav { margin-left: 0px; } li#ctl01_ciUtilityNavigation_ctl01_SignInLI { padding-right: 10px; } #menuv-container { width: 100%; } .about .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 95%; margin-bottom: 10px; } .about .row { position: relative; } .about .col-sm-9 { margin-bottom: 250px; } .member .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 95%; margin-bottom: 10px; } .member .row { position: relative; } .member .col-sm-9 { margin-bottom: 100px; } .honors .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 95%; margin-bottom: 10px; } .honors .row { position: relative; } .honors .col-sm-9 { margin-bottom: 600px; } .pubs .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 95%; margin-bottom: 10px; } .pubs .row { position: relative; } .pubs .col-sm-9 { margin-bottom: 400px; } .standards .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 95%; margin-bottom: 10px; } .standards .row { position: relative; } .standards .col-sm-9 { margin-bottom: 400px; } .JournPI .ISTLeftNav_BigButtonSingle { float: right; position: absolute; bottom: 0; width: 88%; margin-bottom: 10px; } .JournPI .row { position: relative; } .JournPI .col-sm-9 { margin-bottom: 700px; } } @media (max-width: 767px) { .footer-col { float: left; width: 100%; text-align: left; } .footer-col ul { padding-bottom: 10px; margin-left: 5px; } .footer-col li { padding-top: 0px; } .footer-content { background-color: #fff; color: #004a80; text-align: center; border-top: #004a80 1px solid; padding-top: 40px; float: left; position: absolute; width: 100%; } .footer-nav { float: left; position: relative; width: 100%; } .FooterTop { float: left; width: 100%; margin: 30px 0 20px 0; } .FooterLogo { margin-left: 0px; text-align: left; float: left; } .FooterLogo img { margin-right: 0px; width: 100px; } .FooterSocial { padding: 0; height: 95px; } .FooterSocial p { margin: 13px 0px 0px 40px; float: right; } .FooterSocialText { float: right; width: 60%; text-align: right; } .FooterSocialImg { float: right; width: 60%; text-align: right; } .FooterCopyright { padding: 0px; text-align: left; } .FooterPolicies { float: left; } } </style><link href="/WebResource.axd?d=vsKpXOsoEwDUfVug9KOqSkSpSXVaNBxqYq0TMVVWWh_D58LwxadHoTawtKs_sAzbrfD81FIKiaLMHfE8C-_1biM4aI5bWvBCogQPX0gqgkMOYJs3nRZiavvQhAxxuDxC0&amp;t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><link href="/WebResource.axd?d=HkmiNSkKJcp9zYO7iQ4LQYfcigDVRny_bpX66yTnidFPn8M0fokYr_cMbxH1xEMngudkfC-ZQGay1WGMDdRErxNZUgamrg-I8VpzCrtaoIMj66mt5nON1ewFVLY5L_q3Yce4_RXJFjxRuseRI8WsKg2&amp;t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><link href="/WebResource.axd?d=501uTrfxke03AuVmqnFlcktdinHr9iVATciqFqhrqcOKTkomCd2QsHcrpjM_-xP-CqLQMjvRfrLOklibzCo45kW9flZoyUMNvcu5Mzw18RmO3Ap9KCdkr0PDfDKg4UD2xehjr4iqZqrdKJTFAyVnpJ_TSCRLyoWqgAqgqCSum1w1&amp;t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><link href="/WebResource.axd?d=lsHxUYuoKn-ifTgGVgyNZQIZvqADQrnISEv67X7zPJZKRrk0V0-vW7xMMI_xGUYrNRgY0V24wprRwBJ4JAXB-PL_gpUMjcMBvixi6LjgrUFLgqzRtutTUA_SoLdglMWBbO_rKbCsgkpfQKzxCPGYpg2&amp;t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><link href="/WebResource.axd?d=QKX1AO1vX8ebWKfbb4eOTEvhmgWml__oci93TQX-3srWTTq0hFmFYaJFDB9UKx-GfZqFRMKXn0jqsLJL5ywha6wcGEkFGP5adK9HNSwDXjbr9acE3_Dn_vJGCZ6A4DPI0&amp;t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><link href="/WebResource.axd?d=_s8C6V0hVmZ50IR7zUlbBIx5hUAxbrT46F-Qe8ZhKIJt5wIqEePTdOgtTXhtJoRp9My5-D7o68asFZMfXdfLr664FIRyOvnOI7-P_C5gb1tC239yby2f-BLyA5vIZfs93zIzosPTIzGVgEB2TKXRYc6QuLCwX1Z_PNYRI9JRlaA1&amp;t=638584690780000000" type="text/css" rel="stylesheet" class="Telerik_stylesheet" /><meta property="og:title" content="Image Quality and System Performance XX (IQSP)" /><style type="text/css"> .ctl01_TemplateBody_ContentPage1_WebPartZone1_Page1_0 { border-color:Black;border-width:1px;border-style:Solid; } .ctl01_TemplateBody_ContentPage2_WebPartZone2_Page1_0 { border-color:Black;border-width:1px;border-style:Solid; } </style></head> <body id="MainBody"> <a href="#mainContentWrapper" class="screen-reader-text show-on-focus">Skip to main content</a> <form method="post" action="/IST/IST/Conferences/EI/EI2023/Conference/C_IQSP.aspx?" id="aspnetForm" class="form-main" novalidate=""> <div class="aspNetHidden"> <input type="hidden" name="__WPPS" id="__WPPS" value="s" /> <input type="hidden" name="__CTRLKEY" id="__CTRLKEY" value="" /> <input type="hidden" name="__SHIFTKEY" id="__SHIFTKEY" value="" /> <input type="hidden" name="ctl01_ScriptManager1_TSM" id="ctl01_ScriptManager1_TSM" value="" /> <input type="hidden" name="PageInstanceKey" id="PageInstanceKey" value="44b3713a-9848-4b21-9c17-bb31b5afa12f" /> <input type="hidden" name="__RequestVerificationToken" id="__RequestVerificationToken" value="4-1V8qca63QnH3Mxis6sQSVqLZTBsS-b4WrggwZr1S5nOXcqZVWRNJZ5__pNckfRk-VU-QR96vI1C2wkLbyhDuWAtIVvEphFFLZhGKFntqQ1" /> <input type="hidden" name="TemplateUserMessagesID" id="TemplateUserMessagesID" value="ctl01_TemplateUserMessages_ctl00_Messages" /> <input type="hidden" name="PageIsDirty" id="PageIsDirty" value="false" /> <input type="hidden" name="IsControlPostBack" id="IsControlPostBack" value="1" /> <input type="hidden" name="__EVENTTARGET" id="__EVENTTARGET" value="" /> <input type="hidden" name="__EVENTARGUMENT" id="__EVENTARGUMENT" value="" /> <input type="hidden" name="NavMenuClientID" id="NavMenuClientID" value="ctl01_ciPrimaryNavigation_NavControl_NavMenu" /> <input type="hidden" name="AtomObjectPrimaryKeyctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_TaggedListRepeater_ctl00_ctl00" id="AtomObjectPrimaryKeyctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_TaggedListRepeater_ctl00_ctl00" value="/wEUKwABKClYU3lzdGVtLkd1aWQsIG1zY29ybGliLCBWZXJzaW9uPTQuMC4wLjAsIEN1bHR1cmU9bmV1dHJhbCwgUHVibGljS2V5VG9rZW49Yjc3YTVjNTYxOTM0ZTA4OSQ4ZDg3MWRmOS1kOWE4LTRiNmEtOTJlOS0xOTNjNTczYzQ0OTQ=" /> <input type="hidden" name="__VIEWSTATE" id="__VIEWSTATE" value="csyf7Kj5pyW4DNf44113T2UxUnLUwdVZTtExreHa0bBJA0otJKcCaAC3EP2WmvUWX52sqcs0rjW1R6PL8wuG1uZaHAIX4Zpx9lBAta4VVSXbHpTts+hMtnkk5paLioqWk6NdrmVsBI+vwXhbdjan8e+F9hYUjrs8O7FwwJ3dehfSy5H03S+HcUqJQ4dKmOl6trt9yQsbl0CLBHL6xjd+FV1hXyabAObl7eDhK+IfhaNHp9uhVGJ+NnCVyxdzAhZI269joxt4Ypfudk2RkKM8uwml4Jz9nmzYoMKhH4EIn6VgrrNXZ5xuaY40Ek+obQZyqA0V+ZqICSbCm4jteJMGhWLXjTszCaYvezg/PjvmcyU1xQ3Z76HN8DbmsFS7/rzWJTiCu1YZwhnX+r1DM2YEVh1xvmBNIQ6CNQLXAKd0xCmRj1esryxqCfHqpJLvvrer6qEEzjZ46HwVh2Tv8YmRR7vg7tWe90K3zZjTDacCXASKyGDRKpiyxa1D0eeCTT83wQqw/PgyvuV3BsItGZvar23OS9ELn0jYAUCTklfCH0sDkrO2l4RfaqqXEytCK7YGo7q7M+xpt4FsI+ziPvyQRzd00scaIiDyCLMXZajmoF3Nw6voKM+eWPvny8sUIbx/imEHe9OraRxgpB4gCM09Cw==" /> </div> <script type="text/javascript"> //<![CDATA[ var theForm = document.forms['aspnetForm']; function __doPostBack(eventTarget, eventArgument) { if (!theForm.onsubmit || (theForm.onsubmit() != false)) { theForm.__EVENTTARGET.value = eventTarget; theForm.__EVENTARGUMENT.value = eventArgument; theForm.submit(); } } //]]> </script> <script src="/WebResource.axd?d=pynGkmcFUV13He1Qd6_TZLwPOoJkZw-ail94-Z1e8o9JKy5XzVIKyzXBuAdScQY_NLwimceAOofHK9IKX_cayw2&amp;t=638628063619783110" type="text/javascript"></script> <script src="/ScriptResource.axd?d=xtDSkLnUefsDkXzy1V9OGa6OCagMXmkl_on3a8y8NGDPptd6ziBM_mXXpJaW6g3bAnmjPrqbW2lbph1KssEF59UbMuFmudeQGDDUAWBHVZTdtlKFn7LFGi9mzCfs5F5gse9haA3GMbufpptHVBPI0A2&amp;t=539c0818" type="text/javascript"></script> <script src="/ScriptResource.axd?d=p_deTm7itTQEj4Yl_o9V5132R7IOH_FNkOrNg0Wu4z-0Jjzbg8oBmdxEdRBkchw1tEKKp1WoH6jZO5LAJJGzCkc-KY5mVc68zRsR3i7g0WA6qQgfRzQnMZfrhMdR7PmIqa76dpt6oxpcUJLoqJ_Ouw2&amp;t=ffffffffc7a8e318" type="text/javascript"></script> <script type="text/javascript"> //<![CDATA[ function CopyMoveContentItem_Callback(dialogWindow) { if (!dialogWindow.result) return; eval(dialogWindow.Argument.replace('[[RESULT]]',dialogWindow.result)); } //]]> </script> <script src="/Telerik.Web.UI.WebResource.axd?_TSM_HiddenField_=ctl01_ScriptManager1_TSM&amp;compress=1&amp;_TSM_CombinedScripts_=%3b%3bAjaxControlToolkit%3aen-US%3a0c8c847b-b611-49a7-8e75-2196aa6e72fa%3aea597d4b%3ab25378d2%3bTelerik.Web.UI%2c+Version%3d2024.3.805.462%2c+Culture%3dneutral%2c+PublicKeyToken%3d121fae78165ba3d4%3aen-US%3a169c7ca7-1df1-4370-a5b9-ee71a36cb3f0%3a16e4e7cd%3a33715776%3af7645509%3a24ee1bba%3ae330518b%3a2003d0b8%3ac128760b%3a1e771326%3a88144a7a%3ac8618e41%3a1a73651d%3a333f8d94%3a8e6f0d33%3a1f3a7489%3a6a6d718d%3aed16cbdc%3a19620875%3a874f8ea2%3ab2e06756%3af46195d3%3a92fe8ea0%3afa31b949%3a4877f69a%3a490a9d4e%3abd8f85e4" type="text/javascript"></script> <script src="/AsiCommon/Scripts/Asi.js?v=-1289050844" type="text/javascript"></script> <script src="/iparts/Common/ContentCollectionOrganizer/ContentCollectionOrganizer.js" type="text/javascript"></script> <script type="text/javascript"> //<![CDATA[ var PageMethods = function() { PageMethods.initializeBase(this); this._timeout = 0; this._userContext = null; this._succeeded = null; this._failed = null; } PageMethods.prototype = { _get_path:function() { var p = this.get_path(); if (p) return p; else return PageMethods._staticInstance.get_path();}, GetActionLink:function(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,succeededCallback, failedCallback, userContext) { return this._invoke(this._get_path(), 'GetActionLink',false,{action:action,templateType:templateType,docType:docType,hierarchyKey:hierarchyKey,documentVersionKey:documentVersionKey,folderHierarchyKey:folderHierarchyKey,itemCount:itemCount,closeWindowOnCommit:closeWindowOnCommit,websiteKey:websiteKey,pageInstanceKey:pageInstanceKey},succeededCallback,failedCallback,userContext); }, GetWindowProperties:function(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,succeededCallback, failedCallback, userContext) { return this._invoke(this._get_path(), 'GetWindowProperties',false,{action:action,templateType:templateType,docType:docType,hierarchyKey:hierarchyKey,documentVersionKey:documentVersionKey,folderHierarchyKey:folderHierarchyKey,itemCount:itemCount,closeWindowOnCommit:closeWindowOnCommit,websiteKey:websiteKey,pageInstanceKey:pageInstanceKey},succeededCallback,failedCallback,userContext); }, GetAddressCompletionList:function(prefixText,succeededCallback, failedCallback, userContext) { return this._invoke(this._get_path(), 'GetAddressCompletionList',false,{prefixText:prefixText},succeededCallback,failedCallback,userContext); }, CheckForPasteConflict:function(selectedKeys,targetHierarchyKey,succeededCallback, failedCallback, userContext) { return this._invoke(this._get_path(), 'CheckForPasteConflict',false,{selectedKeys:selectedKeys,targetHierarchyKey:targetHierarchyKey},succeededCallback,failedCallback,userContext); }} PageMethods.registerClass('PageMethods',Sys.Net.WebServiceProxy); PageMethods._staticInstance = new PageMethods(); PageMethods.set_path = function(value) { PageMethods._staticInstance.set_path(value); } PageMethods.get_path = function() { return PageMethods._staticInstance.get_path(); } PageMethods.set_timeout = function(value) { PageMethods._staticInstance.set_timeout(value); } PageMethods.get_timeout = function() { return PageMethods._staticInstance.get_timeout(); } PageMethods.set_defaultUserContext = function(value) { PageMethods._staticInstance.set_defaultUserContext(value); } PageMethods.get_defaultUserContext = function() { return PageMethods._staticInstance.get_defaultUserContext(); } PageMethods.set_defaultSucceededCallback = function(value) { PageMethods._staticInstance.set_defaultSucceededCallback(value); } PageMethods.get_defaultSucceededCallback = function() { return PageMethods._staticInstance.get_defaultSucceededCallback(); } PageMethods.set_defaultFailedCallback = function(value) { PageMethods._staticInstance.set_defaultFailedCallback(value); } PageMethods.get_defaultFailedCallback = function() { return PageMethods._staticInstance.get_defaultFailedCallback(); } PageMethods.set_enableJsonp = function(value) { PageMethods._staticInstance.set_enableJsonp(value); } PageMethods.get_enableJsonp = function() { return PageMethods._staticInstance.get_enableJsonp(); } PageMethods.set_jsonpCallbackParameter = function(value) { PageMethods._staticInstance.set_jsonpCallbackParameter(value); } PageMethods.get_jsonpCallbackParameter = function() { return PageMethods._staticInstance.get_jsonpCallbackParameter(); } PageMethods.set_path("C_IQSP.aspx"); PageMethods.GetActionLink= function(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,onSuccess,onFailed,userContext) {PageMethods._staticInstance.GetActionLink(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,onSuccess,onFailed,userContext); } PageMethods.GetWindowProperties= function(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,onSuccess,onFailed,userContext) {PageMethods._staticInstance.GetWindowProperties(action,templateType,docType,hierarchyKey,documentVersionKey,folderHierarchyKey,itemCount,closeWindowOnCommit,websiteKey,pageInstanceKey,onSuccess,onFailed,userContext); } PageMethods.GetAddressCompletionList= function(prefixText,onSuccess,onFailed,userContext) {PageMethods._staticInstance.GetAddressCompletionList(prefixText,onSuccess,onFailed,userContext); } PageMethods.CheckForPasteConflict= function(selectedKeys,targetHierarchyKey,onSuccess,onFailed,userContext) {PageMethods._staticInstance.CheckForPasteConflict(selectedKeys,targetHierarchyKey,onSuccess,onFailed,userContext); } var gtc = Sys.Net.WebServiceProxy._generateTypedConstructor; Type.registerNamespace('Asi.Web.UI.Common.BSA'); if (typeof(Asi.Web.UI.Common.BSA.WindowProperties) === 'undefined') { Asi.Web.UI.Common.BSA.WindowProperties=gtc("Asi.Web.UI.Common.BSA.WindowProperties"); Asi.Web.UI.Common.BSA.WindowProperties.registerClass('Asi.Web.UI.Common.BSA.WindowProperties'); } Type.registerNamespace('Asi.Web.UI'); if (typeof(Asi.Web.UI.PageOperation) === 'undefined') { Asi.Web.UI.PageOperation = function() { throw Error.invalidOperation(); } Asi.Web.UI.PageOperation.prototype = {None: 0,Edit: 1,New: 2,Execute: 3,Select: 4,SelectAndReturnValue: 5,Delete: 6,Purge: 7,Import: 8,Export: 9,Publish: 10,Versions: 11,Refresh: 12,Cut: 13,Copy: 14,Paste: 15,Undo: 16,SelectAll: 17,Search: 18,Preview: 19,Submit: 20,RequestDelete: 21,PurgeAll: 22,Download: 23} Asi.Web.UI.PageOperation.registerEnum('Asi.Web.UI.PageOperation', true); } if (typeof(Asi.Web.UI.TemplateType) === 'undefined') { Asi.Web.UI.TemplateType = function() { throw Error.invalidOperation(); } Asi.Web.UI.TemplateType.prototype = {I: 0,D: 1,P: 2,E: 3,T: 4,F: 5,A: 6} Asi.Web.UI.TemplateType.registerEnum('Asi.Web.UI.TemplateType', true); } //]]> </script> <object hidden type='application/json'><param name='__ClientContext' id='__ClientContext' value='{"baseUrl":"/","isAnonymous":true,"tenantId":"SIST","loggedInPartyId":"54444","selectedPartyId":"54444","websiteRoot":"https://www.imaging.org/","virtualDir":"","appTimeZoneOffset":-14400000.0,"cookieConsent":2}'></object> <input type="hidden" name="ctl01$lastClickedElementId" id="lastClickedElementId" /> <script type="text/javascript"> //<![CDATA[ Sys.WebForms.PageRequestManager._initialize('ctl01$ScriptManager1', 'aspnetForm', ['tctl01$UserMessagesUpdatePanel','','tctl01$TemplateBody$PublishUpdatePanel','','tctl01$TemplateBody$WebPartManager1$gwpciConfCCO$ciConfCCO$updatePanel','','tctl01$RadAjaxManager1SU',''], ['ctl01$ScriptManager1','','ctl01$TemplateBody$WebPartManager1$gwpciConfCCO$ciConfCCO$radTab_Top',''], [], 3600, 'ctl01'); //]]> </script> <input type="hidden" name="ctl01$ScriptManager1" id="ctl01_ScriptManager1" /> <script type="text/javascript"> //<![CDATA[ Sys.Application.setServerId("ctl01_ScriptManager1", "ctl01$ScriptManager1"); Sys.Application._enableHistoryInScriptManager(); //]]> </script> <iframe id="__historyFrame" src="/ScriptResource.axd?d=sF1-5TUJHJrmHhsRGMb7NERMQXWk8jZ537vYX4WnXUBFnUWPFfDlTpBSsZCNoYF0qO5LVGKm_J5lxTmMk469C0Dgmi0eA6XrTVUKJn6Qxnc2CpzDRYQZgYgqqCTklpmu0" style="display:none;"> </iframe> <script type="text/javascript"> </script> <div id="ctl01_masterWrapper" class="wrapper"> <a id="PageTop" class="sr-only">Top of the Page</a> <header id="hd" class="header ClearFix navbar" data-height-offset="true"> <div class="header-top-container"> <div class="header-container"> <div id="masterLogoArea" class="header-logo-container pull-left" data-label="Logo"> <div ID="WTZone1_Page1" class="WTZone "> <div id="ste_container_ciLogoContent" class="ContentItemContainer"> <div id="ste_container_HeaderLogoSpan" class="ContentItemContainer"><strong style="color: transparent;"><a href="https://www.imaging.org/" style="color: transparent;"> <div style="height: 55px; width: 400px;"><img src="/images/IST_website_banner.png" style="width: 434px; height: 93px;"></div> </a></strong></div><div class="ContentRecordPageButtonPanel"> </div> </div> </div> </div> <div class="navbar-header nav-auxiliary pull-right" id="auxiliary-container" data-label="Utility"> <div ID="WTZone2_Page1" class="WTZone "> <div id="ste_container_ciUtilityNavigation" class="ContentItemContainer"><div id="ctl01_ciUtilityNavigation_UtilityPlaceholder" class="UtilityNavigation nav-auxiliary" role="navigation"> <div class="UtilitySection UtilityAccountArea"> <ul id="ctl01_ciUtilityNavigation_ctl01_AccountMenu" class="account-menu"> <li id="ctl01_ciUtilityNavigation_ctl01_SignInLI"> <a id="ctl01_ciUtilityNavigation_ctl01_LoginStatus1" class="sign-in-link" translate="yes" href="javascript:__doPostBack(&#39;ctl01$ciUtilityNavigation$ctl01$LoginStatus1$ctl02&#39;,&#39;&#39;)">Sign in</a> </li> </ul> <script> function ToggleOBO(toggle_id) { var $this = jQuery("#" + toggle_id); var enabled = $this.hasClass("on"); var button; if (enabled) // click the clear button button = window.$get('ctl01_ciUtilityNavigation_ctl01_OBOControlPanel_ClearContactButton'); else // click the select button button = window.$get('ctl01_ciUtilityNavigation_ctl01_OBOControlPanel_SelectContactButton'); if (button != null) button.click(); } jQuery(document).ready(function () { jQuery('.website-item:gt(2)').hide(); jQuery('.js-show-more-sites').show(); jQuery('.js-show-more-sites a').on("click", function () { jQuery('.website-item:not(:visible):lt(5)').fadeIn(function () { if (jQuery('.website-item:not(:visible)').length == 0) { jQuery('.website-item a').last().focus(); jQuery('.js-show-more-sites').remove(); } }); return false; }); }); </script> </div><div class="UtilitySection UtilityNavigationList"> <ul class='NavigationUnorderedList'><li id="ctl01_ciUtilityNavigation_ctl03__rptWrapper__rptWrapper_rpt_ctl01_NavigationListItem" class="NavigationListItem nav-aux-button nav-aux-cart"><a id="ctl01_ciUtilityNavigation_ctl03__rptWrapper__rptWrapper_rpt_ctl01_NavigationLink" class="NavigationLink" onClick="MenuLI_OnClick(&#39;/IST/IST/iMIS/Store/StoreLayouts/Cart_OLD.aspx?hkey=f596492d-3628-4f58-b5ac-bbf9ff3a0784&#39;)" href="/IST/IST/iMIS/Store/StoreLayouts/Cart_OLD.aspx?hkey=f596492d-3628-4f58-b5ac-bbf9ff3a0784"><span class="nav-text" translate="yes">Cart </span><span><span data-cartlink='y' class="cartEmpty"><span class="cartSprite"></span></span></span></a></li><li id="ctl01_ciUtilityNavigation_ctl03__rptWrapper__rptWrapper_rpt_ctl02_NavigationListItem" class="NavigationListItem"><a id="ctl01_ciUtilityNavigation_ctl03__rptWrapper__rptWrapper_rpt_ctl02_NavigationLink" class="NavigationLink" onClick="MenuLI_OnClick(&#39;/IST/IST/MyAccount/CreateAccount.aspx?hkey=44e40f35-cbf4-4ac4-929b-39a8d9f1668b&#39;)" href="/IST/IST/MyAccount/CreateAccount.aspx?hkey=44e40f35-cbf4-4ac4-929b-39a8d9f1668b"><span class="nav-text" translate="yes">Create Account</span></a></li></ul> </div><div class="UtilitySection UtilityNavigationToggle"> <button onclick="return false;" class="navbar-toggle collapsed menu-toggle" data-toggle="collapse" data-target=".navbar-collapse" data-parent=".navbar"><span class="sr-only" translate="yes">Toggle navigation</span> <span class="icon-bar"></span> <span class="icon-bar"></span> <span class="icon-bar"></span> </button> </div><div class="UtilitySection UtilitySTEToggle ste-section"> </div> </div> <script type="text/javascript"> //<![CDATA[ // adapted from bs.collapse - if the search bar is open when opening the menu, close it (and vise versa) function UtilityAreaAction(utilityControl, hideArea) { jQuery(utilityControl).on('show.bs.collapse', function() { var actives = jQuery(hideArea).find('> .in'); if (actives && actives.length) { var hasData = actives.data('bs.collapse'); if (hasData && hasData.transitioning) return; actives.collapse('hide'); hasData || actives.data('bs.collapse', null); } }); } jQuery(document).ready(function () { UtilityAreaAction('.navbar-collapse', '.nav-auxiliary'); UtilityAreaAction('.searchbar-collapse', '.primary-navigation-area'); }); //]]> </script> </div> <div id="ste_container_ciNewUtilityNavigationCommon2" class="ContentItemContainer"><div id="ctl01_ciNewUtilityNavigationCommon2_UtilityPlaceholder" class="UtilityNavigation nav-auxiliary"> <div class="UtilitySection UtilityNavigationToggle"> <button onclick="return false;" class="navbar-toggle collapsed searchbar-toggle" data-toggle="collapse" data-target=".searchbar-collapse" data-parent=".navbar"><span class="sr-only" translate="yes">Toggle search</span> <span class="icon-search"></span> </button> </div><div class="UtilitySection UtilitySearch"> <div class="search-container-sm" data-set="searchbar"> <div class="header-search"> <div id="SimpleSearchBlock" role="search" class="SimpleSearchBlock"><label for="ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTerms" id="ctl01_ciNewUtilityNavigationCommon2_ctl05_Prompt" class="screen-reader-text" translate="yes">Keyword search</label><input name="ctl01$ciNewUtilityNavigationCommon2$ctl05$SearchTerms" type="text" value="Keyword search" id="ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTerms" class="search-field Watermarked" onfocus="SimpleSearchField_OnFocus(&#39;ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTerms&#39;, ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties);" onblur="SimpleSearchField_OnBlur(&#39;ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTerms&#39;, ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties);" onkeypress="return clickButton(event,&#39;ctl01_ciNewUtilityNavigationCommon2_ctl05_GoSearch&#39;);" translate="yes" /><input type="button" name="ctl01$ciNewUtilityNavigationCommon2$ctl05$GoSearch" value="Go" onclick="SimpleSearchField_ExecuteSearch(&#39;ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTerms&#39;, ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties); return cancelEvent();__doPostBack(&#39;ctl01$ciNewUtilityNavigationCommon2$ctl05$GoSearch&#39;,&#39;&#39;)" id="ctl01_ciNewUtilityNavigationCommon2_ctl05_GoSearch" class="TextButton" /></div> </div> </div> </div><div id="ctl01_ciNewUtilityNavigationCommon2_search-collapse" class="searchbar-collapse collapse" style="height:auto;"> <div class="search-container" data-set="searchbar"> <div class="header-search"> <div id="ResponsiveSimpleSearchBlock" role="search" class="SimpleSearchBlock"><label for="ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTerms" id="ctl01_ciNewUtilityNavigationCommon2_ctl08_Prompt" class="screen-reader-text" translate="yes">Keyword search</label><input name="ctl01$ciNewUtilityNavigationCommon2$ctl08$SearchTerms" type="text" value="Keyword search" id="ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTerms" class="search-field Watermarked" onfocus="SimpleSearchField_OnFocus(&#39;ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTerms&#39;, ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties);" onblur="SimpleSearchField_OnBlur(&#39;ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTerms&#39;, ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties);" onkeypress="return clickButton(event,&#39;ctl01_ciNewUtilityNavigationCommon2_ctl08_GoSearch&#39;);" translate="yes" /><input type="button" name="ctl01$ciNewUtilityNavigationCommon2$ctl08$GoSearch" value="Go" onclick="SimpleSearchField_ExecuteSearch(&#39;ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTerms&#39;, ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties); return cancelEvent();__doPostBack(&#39;ctl01$ciNewUtilityNavigationCommon2$ctl08$GoSearch&#39;,&#39;&#39;)" id="ctl01_ciNewUtilityNavigationCommon2_ctl08_GoSearch" class="TextButton" /></div> </div> </div> </div> </div> <script type="text/javascript"> //<![CDATA[ // adapted from bs.collapse - if the search bar is open when opening the menu, close it (and vise versa) function UtilityAreaAction(utilityControl, hideArea) { jQuery(utilityControl).on('show.bs.collapse', function() { var actives = jQuery(hideArea).find('> .in'); if (actives && actives.length) { var hasData = actives.data('bs.collapse'); if (hasData && hasData.transitioning) return; actives.collapse('hide'); hasData || actives.data('bs.collapse', null); } }); } jQuery(document).ready(function () { UtilityAreaAction('.navbar-collapse', '.nav-auxiliary'); UtilityAreaAction('.searchbar-collapse', '.primary-navigation-area'); }); //]]> </script> </div> <div id="ste_container_ciNewContentHtml_4c5b80c800b04a9a932a995d85dcc730" class="ContentItemContainer"><div class="HeaderSocial"><div id="ctl01_ciNewContentHtml_4c5b80c800b04a9a932a995d85dcc730_Panel_NewContentHtml"> <a href="https://www.linkedin.com/company/society-for-imaging-science-and-technology-is&amp;t-"><img src="/images/Icons/linkedin36blue.png" alt="" style="width: 15px; height: 13px; margin-right: 10px;"></a> </div></div></div> </div> </div> </div> </div> <div data-label="Primary" class="ClearFix header-bottom-container"> <div class="header-container"> <div ID="WTZone3_Page1" class="WTZone "> <div id="ste_container_ciPrimaryNavigation" class="ContentItemContainer"> <div id="ctl01_ciPrimaryNavigation_PrimaryNavigationArea" class="primary-navigation-area"> <div id="ctl01_ciPrimaryNavigation_PrimaryNavigationControl" class="collapse navbar-collapse nav-primary"> <nav id="ctl01_ciPrimaryNavigation_NavControl_NavWrapper" aria-label="primary"> <div tabindex="0" id="ctl01_ciPrimaryNavigation_NavControl_NavMenu" class="RadMenu RadMenu_NaturalHeritageSites MainMenu MegaDropDownMenuOuter" Translate="Yes" PerspectiveId="80409b89-ae6d-45a9-a9d4-96d522ff2047" NavigationArea="1" MaxDataBindDepth="3" style="z-index:2999;"> <!-- 2024.3.805.462 --><input class="rmRootGroup rmToggleHandles rmHorizontal" id="ctl01_ciPrimaryNavigation_NavControl_NavMenu_ClientState" name="ctl01_ciPrimaryNavigation_NavControl_NavMenu_ClientState" type="hidden" /> </div> </nav> </div> </div></div> </div> </div> </div> <div id="HomePageContent" class="HomePageFullWidthArea" data-label="Home Content"> <div ID="WTZone4_Page1" class="WTZone "> <div id="ste_container_ciFullWidthContent" class="ContentItemContainer"> </div> </div> </div> <div class="InternalHeaderArea" data-label="Internal Header"> <div ID="WTZone5_Page1" class="WTZone "> <div id="ste_container_ciInternalFullWidthHeader" class="ContentItemContainer"> </div> </div> </div> </header> <div id="masterContentArea" class="container ClearFix"> <div role="main" class="body-container" id="ContentPanel"> <div data-label="On Behalf Of" class="ClearFix OnBehalfOfContainer"> <div ID="WTZone6_Page1" class="WTZone "> <div id="ste_container_ciOBO" class="ContentItemContainer"><div class="FloatRight"><div id="ctl01_ciOBO_UtilityPlaceholder" class="UtilityNavigation nav-auxiliary"> <div class="UtilitySection OnBehalfOf"> </div> </div> </div></div> </div> </div> <div class="col-primary" data-label="Main Content"> <div ID="WTZone7_Page1" class="WTZone "> <div id="mainContentWrapper" class="ContentPanel"> <div id="masterMainBreadcrumb" data-height-offset="true" ></div> <div id="ctl01_UserMessagesUpdatePanel"> </div> <script type="text/javascript"> </script> <div id="ctl01_TemplateBody_PublishUpdatePanel"> </div><div id="ctl01_TemplateBody_WebPartManager1___Drag" style="display:none;position:absolute;z-index:32000;filter:alpha(opacity=75);"> </div> <div> <div class="row"> <div class="col-sm-3"> <div class="ContentItemContainer"> <div id="WebPartZone1_Page1" class="WebPartZone "> <div class="iMIS-WebPart"> <div id="ste_container_ciCornerArt" class="ContentItemContainer"><div id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_ListTitle" class="panel " style="border-style:None;"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_ListTitle_Head" class="panel-heading"> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_ListTitle_BodyContainer" class="panel-body-container"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_ListTitle_Body" class="panel-body"> <div class="TaggedListPanel"> <span><div class='ContentHtml'><link href="https://fonts.googleapis.com/css2?family=Open+Sans:ital,wght@0,300;0,400;0,600;0,700;0,800;1,300;1,400;1,600;1,700;1,800&amp;display=swap" rel="stylesheet" /> <link href="https://fonts.googleapis.com/css2?family=Montserrat:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,900&amp;display=swap" rel="stylesheet" /> <link href="https://fonts.googleapis.com/css2?family=Jost:ital,wght@0,100;0,200;0,300;0,400;0,500;0,600;0,700;0,800;0,900;1,100;1,200;1,300;1,400;1,500;1,600;1,700;1,800;1,900&amp;display=swap" rel="stylesheet" /></div><div class='ContentHtml'><style type="text/css"> #ConfContent { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 14px; font-weight: 400; } .ConfContent { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 14px; font-weight: 400; } #SymposiumContent { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 14px; font-weight: 400; } .SymposiumContent { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 14px; font-weight: 400; } ul { margin: 0 0 0 2em; padding: 0; } ol { margin: 0 0 0 2em; padding: 0; } P + UL { margin: 0em 0em 1em 2em; } P + OL { margin: 0em 0em 1em 2em; } P+ H2 { margin: 0 0 1em 0; } H2 + P { margin: 0 0 1em 0; } H2 + UL { margin: 0 0 1em 2em; } H2 + OL { margin: 0 0 1em 2em; } H3 + UL { margin: 0 0 1em 2em; } H3 + OL { margin: 0 0 1em 2em; } H3 + H2 { margin: 0 0 3em 2em; } H1 { color: #d2232a; background-color: white; font-family: 'Jost', sans-serif; font-size: 24px; font-weight: 500; text-transform: none; text-align: left; margin-top: 10px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 0px; padding-bottom: 0px; padding-left: 0; padding-right: 0; line-height: 1em; } #ConfContent h1 { color: #e8ba03; background-color: white; font-family: 'Jost', sans-serif; font-size: 24px; font-weight: 500; text-transform: none; text-align: left; margin-top: 10px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 0px; padding-bottom: 0px; padding-left: 0px; padding-right: 0px; line-height: 1.2em; } .ConfContent h1 { color: #e8ba03; background-color: white; font-family: 'Jost', sans-serif; font-size: 24px; font-weight: 500; text-transform: none; text-align: left; margin-top: 10px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 0px; padding-bottom: 0px; padding-left: 0px; padding-right: 0px; line-height: 1.2em; } H2 { color: #ffffff; background-color: #9f9f9f; font-family: 'Open Sans', sans-serif; font-size: 18px; text-transform: uppercase; font-weight: 400; display: block; padding-top: 2px; padding-bottom: 2px; padding-left: .33em; padding-right: 3px; margin-top: 1em; margin-bottom: .5em; margin-left: 0px; margin-right: 0px; margin-inline-start: 0px; margin-inline-end: 0px; } H3 { color: #d2232a; font-size: 15px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; display: block; border-bottom: 0px solid #ddd; margin: 20px 20px 20px 20px; margin-block-start: 0em; margin-block-end: 0em; margin-inline-start: 0px; margin-inline-end: 0px; } H4 { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 14px; font-weight: 500; } H5 { color: #d2232a; background-color: white; font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; display: block; border-bottom: 1px solid #ddd; margin: 0em 0; margin-block-start: 0em; margin-block-end: 0em; margin-inline-start: 0px; margin-inline-end: 0px; } H6 { color: #808080; font-family: 'Jost', sans-serif; font-size: 14px; font-weight: 500; } /* unvisited link */ a:link { color: #0032a0; font-weight: normal; } /* visited link */ a:visited { color: #0032a0; font-weight: normal; } /* mouse over link */ a:hover { color: #7f7f7f; } /* selected link */ a:active { color: #3b3b3b; font-weight: bold; } /* unvisited link */ #ConfContent a:link { color: #0032a0; font-weight: normal; } /* visited link */ #ConfContent a:visited { color: #0032a0; font-weight: normal; } /* mouse over link */ #ConfContent a:hover { color: #7f7f7f; } /* selected link */ #ConfContent a:active { color: #3b3b3b; font-weight: bold; } /* unvisited link */ .ConfContent a:link { color: #0032a0; font-weight: normal; } /* visited link */ .ConfContent a:visited { color: #0032a0; font-weight: normal; } /* mouse over link */ .ConfContent a:hover { color: #7f7f7f; } /* selected link */ .ConfContent a:active { color: #3b3b3b; font-weight: bold; } .ConfKeywords { font-size: 13px; font-family: 'Open Sans', sans-serif; color: #8C8C8C; font-weight: 400; font-style: italic; margin-top: 0em; margin-bottom: 0em; padding-bottom: 0em; padding-top: 0em; } #toplinkshead { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; display: block; border-bottom: 0px solid #ddd; margin: 20px 20px 20px 20px; margin-block-start: 1em; margin-block-end: 0em; margin-inline-start: 0px; margin-inline-end: 0px; } #toplinks { text-transform: uppercase; color: #0032a0; font-family: 'Jost', sans-serif; font-weight: 400; font-size: 14px; display: block; margin-bottom: 0; margin-left: 27px; } /* unvisited link */ #toplinks a:link { color: #0032a0; } /* visited link */ #toplinks a:visited { color: #0032a0; } /* mouse over link */ #toplinks a:hover { color: #7f7f7f; decoration: underline; } /* selected link */ #toplinks a:active { color: #7f7f7f; decoration: underline; } /* unvisited link */ #toplinkshead a:link { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; } /* visited link */ #toplinkshead a:visited { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; } /* mouse over link */ #toplinkshead a:hover { color: #7f7f7f; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; decoration: underline; } /* selected link */ #toplinkshead a:active { color: #7f7f7f; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; decoration: underline; } .toplinkshead { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; display: block; border-bottom: 0px solid #ddd; margin: 20px 20px 20px 20px; margin-block-start: 1em; margin-block-end: 0em; margin-inline-start: 0px; margin-inline-end: 0px; } #toplinks { text-transform: uppercase; color: #0032a0; font-family: 'Jost', sans-serif; font-weight: 400; font-size: 14px; display: block; margin-bottom: 0; margin-left: 27px; } /* unvisited link */ .toplinks a:link { color: #0032a0; } /* visited link */ .toplinks a:visited { color: #0032a0; } /* mouse over link */ .toplinks a:hover { color: #7f7f7f; decoration: underline; } /* selected link */ .toplinks a:active { color: #7f7f7f; decoration: underline; } /* unvisited link */ .toplinkshead a:link { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; } /* visited link */ .toplinkshead a:visited { color: #0C0C0C; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; } /* mouse over link */ .toplinkshead a:hover { color: #7f7f7f; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; decoration: underline; } /* selected link */ .toplinkshead a:active { color: #7f7f7f; font-size: 14px; font-weight: 500; font-family: 'Jost', sans-serif; text-transform: uppercase; decoration: underline; } .sub { vertical-align: sub } .sup { vertical-align: super } .mixedcase { text-transform: none; } .box{ margin-top: 11px; width: 825px; max-width: 100%; margin-top: 0px; margin-bottom: 0px; margin-left: 0; margin-right: 0; } .leftbox{ margin-top: 0px; margin-left: auto; margin-right: auto; width: 100% max-width: 100%; object-fit: contain; } .box img{ max-width: 100%; max-height: 100%; display: block; /* remove extra space below image */ object-fit: cover; } .EI21coursegroup_time{ color: #4488DC; font-size: 12pt; font-weight: bold; } .SubmissionLink::before { content: " "; } .SubmissionLink { font-size: 9pt !important; font-family: 'Montserrat', sans-serif; font-weight: 600 !important; text-transform: uppercase !important; color: #d2232a !important; } /* unvisited link */ a.SubmissionLink:link { font-size: 9pt !important; font-family: 'Montserrat', sans-serif; font-weight: 600 !important; text-transform: uppercase !important; color: #d2232a !important; } /* visited link */ a.SubmissionLink:visited { font-size: 9pt !important; font-family: 'Montserrat', sans-serif; font-weight: 600 !important; text-transform: uppercase !important; color: #d2232a !important; } /* mouse over link */ a.SubmissionLink:hover { font-size: 9pt !important; font-family: 'Montserrat', sans-serif; font-weight: 600 !important; text-transform: uppercase !important; color: #d2232a !important; decoration: underline !important; } /* selected link */ a.SubmissionLink:active { font-size: 9pt !important; font-family: 'Montserrat', sans-serif; font-weight: 600 !important; text-transform: uppercase !important; color: #d2232a !important; decoration: underline !important; } .nomarginnopadding { margin: 0; padding: 0; } .SessionOrganizer { margin-left: 27px; } .CommitteeMember { font-size: 13px; font-weight: 500; } .CommitteeMemberAffil { font-size: 13px; font-weight: 400; font-style: normal; } .AwardName { font-weight: 600; color: #3f3f3f; } strong { font-weight: 600; } .BoldItalic { font-weight: 600; font-style: italic; } .Thin { font-weight: 100; } .ExtraLight { font-weight: 200; } .Light { font-weight: 300; } .Regular{ font-weight: 400; } .Medium { font-weight: 500; } .SemiBold { font-weight: 600; } .ExtraBold { font-weight: 800; } .Black { font-weight: 900; } caption { display: table-caption; caption-side: bottom; text-align: left; color: #000000; font-family: 'Open Sans', sans-serif; font-size: 12px; font-weight: 200; font-style: italic; line-height: 1.6em; padding: 0.5em; } .TopAlert { font-weight: 300; font-style: italic; font-size: 16px; font-family: Jost, sans-serif; color: #d2232a; margin-top: -2px; margin-bottom: 5px; } a.RegisterLink{ background-color: #d2232a; border-radius: 3px; color: white; padding: 6px 12px; text-align: center; text-decoration: none; display: inline-block; font-size: 14px; margin: 4px 2px; transition-duration: 0.4s; cursor: pointer; } a.RegisterLink:hover { background-color: #b3b3b3; border-radius: 3px; color: white; padding: 6px 12px; text-align: center; text-decoration: none; display: inline-block; font-size: 14px; margin: 4px 2px; transition-duration: 0.4s; cursor: pointer; } </style></div><div class='ContentHtml'><style> /*this is the all-purpose callout. It is behind the keynotes It is grey */ .callout{ background-color:#FFFBED; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } /*this is the callout for panels and special events. It is yellow */ .coloredcallout{ background-color: #F2F2F2; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } /*this is the plenary callout. It is pink */ .pinkcallout{ background-color: #FFEEED; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } #content #story .callout .session_title { line-height: normal; margin-bottom: 1px; } .group { font-size: 14pt; font-weight: bold; text-align: center; } .cat { font-size: 16pt; font-weight: bold; color: red; text-align: center; } .session_time { font-size: 10pt; font-weight: bold; display: inline-block; margin-bottom: 2ex; } .event_time { font-size: 10pt; font-weight: normal; text-align: center; color: #c00000; padding-bottom: 10px; } .date { color: #c00000; background-color: #ffffff; font-size: 18px; font-weight: 500; text-transform: uppercase; margin-top: 11px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 7px; padding-bottom: 7px; padding-left: 0px; padding-right: 0; line-height: 1em; } .session_title { font-size: 11pt; color: #3b3b3b; font-weight: bold; margin-top: 0ex; margin-bottom: 0ex; } .session_title:before{ content: ' '; display: block; border: 0; border-top: 1px solid #c00000; margin-top: 1ex; margin-bottom: 1ex; } .session_title:after { content: ' '; display: block; border: 0; border-bottom: 1px solid #c00000; margin-top: 1ex; margin-bottom: 1ex; } .chair_label { font-size: 10pt; font-weight: bold; } .chair { font-size: 10pt; } p span.author_string { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 400; font-style: italic; } p span.placeholder_desc { font-size: 12pt; font-weight: normal; } .room { font-size: 11pt; color: #c00000; } .presentation_title { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 500; } .presentation_time { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 500; } .session_notes { color: #3b3b3b; font-style: italic; font-weight: 300; font-size: 9pt; display: block; margin-bottom: 1ex; } .abstract{ color: #3b3b3b; font-style: italic; font-weight: 400; display: block; margin-bottom: 1ex; } .bio { color: #3b3b3b; font-style: italic; font-weight: 400; display: block; margin-bottom: 1ex; } .keynote-bio{ font-size: 11px; color: #3b3b3b; font-style: italic; font-weight: 400; display: block; margin-bottom: 1ex; } </style></div><div class='ContentHtml'><div class="leftbox"><a href="http://www.imaging.org/IST/IST/Conferences/EI/EI2023/EI2023.aspx"><img alt="" src="/images/IST_Images/Conferences/EI/EI2023/EI2022_250x125_January2023.png" style="margin-top: 0px; width: 100%;" /></a> </div></div><div class='ContentHtml'><style type="text/css"> /* CSS Dropdown menuv */ #menuv-container { max-width: 100%; display: block; margin-left: 0px; margin-right: auto; width: 250px } #menuv { font-size: 13px; font-family: 'Jost', sans-serif; text-transform: uppercase; max-width:100%; width:100%; float:left; margin-top: 0em; margin-right: 0em; margin-bottom: 0em; margin-left: 0em; border: auto solid #ffffff; background-color: #ffffff; /* white*/ } #menuv_NAV{ padding: 0; margin: 0; border: 0; } #menuv_NAV ul, #menuv_NAV li { list-style: none; margin: 0; padding: 0; } #menuv_NAV ul { position: relative; z-index: 597; float: left; } #menuv_NAV ul li { float: left; min-height: 1px; line-height: 1.5em; vertical-align: middle; } #menuv_NAV ul li.hover, #menuv_NAV ul li:hover { position: relative; z-index: 599; cursor: default; } #menuv_NAV ul ul { visibility: hidden; position: absolute; top: 100%; left: 0; z-index: 598; width: 100%; } #menuv_NAV ul ul li { float: none; } #menuv_NAV ul ul, #menuv_NAV ul ul ul { top: -2px; left: 99%; } #menuv_NAV ul li:hover > ul { visibility: visible; } #menuv_NAV ul li { float: none; } #menuv_NAV a { display: block; font-weight: 400 !important; } /* Custom CSS Styles */ #menuv_NAV { font-family: 'Jost', sans-serif; text-transform: uppercase; font-size: 13px; } #menuv_NAV:after, #menuv_NAV ul:after { content: ''; display: block; clear: both; } #menuv_NAV ul { background: #EEEEEE; border: 0px solid #aaaaaa; padding: 4px; width: 100%; } #menuv_NAV ul li { color: #0C0C0C; position: relative; } #menuv_NAV ul li.hover, #menuv_NAV ul li:hover { background: #cccccc; background: -moz-linear-gradient(#cccccc 0%, #cccccc100%); background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #9f9f9f), color-stop(100%, #cccccc )); background: -webkit-linear-gradient(#cccccc 0%, #cccccc 100%); background: linear-gradient(#cccccc 0%, #cccccc 100%); color: #FFF; } #menuv_NAV ul li.hover > a, #menuv_NAV ul li:hover > a { color: #000; border: 0px solid #cccccc; } #menuv_NAV ul ul { width: 650px; } #menuv_NAV a { border: 0px solid transparent; padding: 3px 10px; } #menuv_NAV a:link, #menuv_NAV a:visited { color: #0C0C0C; text-decoration: none; } #menuv_NAV a:hover { background: #cccccc; background: -moz-linear-gradient(#cccccc 0%, #cccccc 100%); background: -webkit-gradient(linear, left top, left bottom, color-stop(0%, #cccccc ), color-stop(100%, #cccccc )); background: -webkit-linear-gradient(#cccccc 0%, #cccccc 100%); background: linear-gradient(#cccccc 0%, #cccccc 100%); color: #FFF; } #menuv_NAV a:active { color: #ffa500; } #menuv_NAV .has-sub:hover > a:after, #menuv_NAV .has-sub.hover > a:after { border-color: transparent transparent transparent #FFF; } #menuv_NAV .has-sub > a:after { content: ''; width: 0px; height: 0px; border-style: solid; border-width: 0px 0px 0px 0px; border-color: transparent transparent transparent #808080; position: absolute; top: 50%; right: 5%; margin-top: -4px; -webkit-transform: rotate(360deg); } </style> <div id="menuv-container"> <div id="menuv_NAV"> <ul> <li>&nbsp;</li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/Attend___Register/IST/Conferences/EI/EI2023/Attend.aspx" target="_blank"><span style="color: #d2232a;">REGISTER</span></a></li> </ul> <ul> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=1#EntryCCO'">EI Home/About</a> <ul> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=1#EntryCCO">Home</a></li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=2#EntryCCO'">At-a-Glance</a></li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=3#EntryCCO">Awards</a> </li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=4#EntryCCO">EI History</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/TAB_Code_of_Conduct.aspx" target="_blank">Code of Conduct</a></li> <li><a href="http://www.imaging.org/IST/IST/About/Press_Releases.aspx" target="_blank">Press Releases</a> </li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx">Symposium Program</a> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx">EI Program</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=2#ProgramCCO">Symposium Plenary Speakers</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=3#ProgramCCO">EI Conferences</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=4#ProgramCCO">Conference Keynotes</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=5#ProgramCCO">Short Courses</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=6#ProgramCCO">Demonstration &amp; Poster Sessions</a></li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/IST/Conferences/EI/EI2023/EI2023.aspx?EntryCCO=2#EntryCCO'">Program At-a-Glance</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=7#ProgramCCO">Author Index</a></li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=5#ProgramCCO">Short Courses</a></li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=3#ProgramCCO" class="top_parent">Conferences</a> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=3#ProgramCCO">EI Conferences</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_3DMP.aspx">3D Imaging and Applications 2023 (3DIA)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_AVM.aspx">Autonomous Vehicles and Machines 2023 (AVM)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_COLOR.aspx">Color Imaging XXVIII: Displaying, Processing, Hardcopy, and Applications (COLOR)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_COIMG.aspx">Computational Imaging XXI (COIMG)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_CVAA.aspx">Computer Vision and Image Analysis of Art 2023 (CVAA)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_ERVR.aspx">Engineering Reality of Virtual Reality 2023 (ERVR)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_HPCI.aspx">High Performance Computing for Imaging 2023 (HPCI)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_HVEI.aspx">Human Vision and Electronic Imaging 2023 (HVEI)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_IPAS.aspx">Image Processing: Algorithms and Systems XXI (IPAS)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_IQSP.aspx">Image Quality and System Performance XX (IQSP)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_IMAGE.aspx">Imaging and Multimedia Analytics at the Edge 2023 (IMAGE)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_ISS.aspx">Imaging Sensors and Systems 2023 (ISS)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_IRIACV.aspx">Intelligent Robotics and Industrial Applications using Computer Vision 2023 (IRIACV)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_MLSI.aspx">Machine Learning for Scientific Imaging 2023 (MLSI)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_MWSF.aspx">Media Watermarking, Security, and Forensics 2023 (MWSF)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_MOBMU.aspx">Mobile Devices and Multimedia: Enabling Technologies, Algorithms, and Applications 2023 (MOBMU)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_SDA.aspx">Stereoscopic Displays and Applications XXXIV (SD&amp;A)</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Conference/C_VDA.aspx">Visualization and Data Analysis 2023 (VDA)</a></li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=2#ProgramCCO">Symposium Plenary Speakers</a></li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/AuthorSubmit.aspx" class="top_parent">Author/Submit</a> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/AuthorSubmit.aspx?Author_Info=1">Submit How-to</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/AuthorSubmit.aspx?Author_Info=2">Accepted: Next Steps</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Program.aspx?ProgramCCO=6#ProgramCCO">Demonstration Session</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/AuthorSubmit.aspx?Author_Info=3">Publication FAQ</a></li> <li><a href="https://www.imaging.org/PDFS/Conferences/ElectronicImaging/EI_InvitationLetterRequest_Form_Fillable.pdf">Visas and Letters of Invitation</a></li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/Attend___Register/IST/Conferences/EI/EI2023/Attend.aspx" class="top_parent">Attend/Register</a> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Attend.aspx">Registration &amp; Fees</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Attend.aspx?Attendee_Information=2#Attendee_Information">Logistics</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Attend.aspx?Attendee_Information=3#Attendee_Information">Why Attend</a></li> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/Attend.aspx?Attendee_Information=3#JustificationLetter.aspx">Justify Attendance</a> </li> <li><a href="https://www.imaging.org/PDFS/Conferences/ElectronicImaging/EI_InvitationLetterRequest_Form_Fillable.pdf">Visas and Letters of Invitation</a></li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/ExhibitSponsor.aspx" class="top_parent">Exhibit/Sponsor</a> <ul> <li><a href="https://www.imaging.org/IST/IST/Conferences/EI/EI2023/ExhibitSponsor.aspx">Exhibition &amp; Sponsorship Opportunities</a> </li> </ul> </li> </ul> <ul> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/For_Students/IST/Conferences/EI/EI2023/For_Students.aspx" class="top_parent">For Students</a> <ul> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/For_Students/IST/Conferences/EI/EI2023/For_Students.aspx" class="top_parent">Student Focus</a></li> <li><a href="https://www.imaging.org/IST/Conferences/EI/EI2023/For_Students/IST/Conferences/EI/EI2023/For_Students.aspx?Student_Focus_Tabs=2#Student_Focus_Tabs">Student Showcase</a></li> </ul> </li> </ul> </div> <!-- end the menuv-container div --> </div> <!-- end the menuv div --></div><div class='ContentHtml'><div class="leftbox"> <table style="text-align: center; margin-left: auto; margin-right: auto;" width="100%" border="0"> <tbody> <tr> <td colspan="2" 7px;"valign="middle" align="center">&nbsp;</td> </tr> <tr> <td style="height: 19px;" valign="top" align="right"> <!--- begin LinkedIn Share ---> <script src="https://platform.linkedin.com/in.js" type="text/javascript">lang: en_US</script> <script type="IN/Share" data-url="https://www.linkedin.com"></script> <!--- End LinkedIn Share --->&nbsp; </td> <td style="height: 19px;" valign="top" align="left">&nbsp; <!--- begin Twitter Share ---> <a href="https://twitter.com/share" class="twitter-share-button" data-count="none" data-hashtags="EI2023">Tweet</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> <!--- end Twitter Share ---> </td> </tr> <tr> <td colspan="2" valign="middle" align="center"> <!--- begin Twitter Follow ---> <a href="https://twitter.com/ElectroImaging" class="twitter-follow-button" data-show-count="false">Follow @ElectroImaging</a> <script>!function(d,s,id){var js,fjs=d.getElementsByTagName(s)[0],p=/^http:/.test(d.location)?'http':'https';if(!d.getElementById(id)){js=d.createElement(s);js.id=id;js.src=p+'://platform.twitter.com/widgets.js';fjs.parentNode.insertBefore(js,fjs);}}(document, 'script', 'twitter-wjs');</script> <!--- end Twitter Follow ---><br> </td> </tr> </tbody> </table> </div> <br></div><div class='ContentHtml'><style> .greybox { background-color: #f2f2f2; text-align: center; } table.ImportantDates { color: #000000; margin-left: auto; margin-right: auto; border: 4px solid #f2f2f2; padding-top: 3px; padding-bottom: 3px; padding-left: 1px; padding-right: 1px; font-family: Jost, sans-serif; font-size: 10px; font-weight: 300; line-height: 1.1; vertical-align: top; } .ImpDateDescription{ padding-top: 3px; padding-bottom: 3px; text-align: left; font-weight: 400; vertical-align: top; } .ImpDateSubDescription{ padding-top: 3px; padding-bottom: 3px; text-indent: -6px; padding-left: 6px; text-align: left; font-weight: 400; color: #7f7f7f; vertical-align: top; } .impdatedate { padding-top: 3px; padding-bottom: 3px; text-align: center; font-weight: 400; color: #7f7f7f; font-family: Jost, sans-serif; vertical-align: top; text-align: center;" } .impdatedatesub { padding-top: 3px; padding-bottom: 3px; text-align: center; font-weight: 400; color: #7f7f7f; font-family: Jost, sans-serif; vertical-align: top; text-align: center;" } </style> <div class="leftbox"> <div class="greybox"> <a name="Deadlines" id="Deadlines"></a> <table class="ImportantDates" align="center"> <thead> <tr> <td colspan="2" style="text-align: center; white-space: nowrap;"><span style="font-weight: 500; font-size: 14px; font-family: Jost, sans-serif; color: #d2232a;">IMPORTANT DATES<br /> </span> <span style="font-size: 10px; color: #d2232a;"><em>Dates currently being confirmed; check back.</em></span> </td> </tr> <tr> <td colspan="2">&nbsp;</td> </tr> </thead> <tbody> <tr> <td class="ImpDateDescription"> <br /> </td> <td class="impdate"><span style="font-weight: 500;">2022</span></td> </tr> <tr> <td class="ImpDateDescription">Call for Papers Announced</td> <td class="impdate">2 May</td> </tr> <tr> <td class="ImpDateDescription">Journal-first (JIST/JPI) Submissions <br /> </td> <td class="impdate"><br /> </td> </tr> <tr> <td class="ImpDateSubDescription">∙ Submission site Opens</td> <td class="impdatedatesub">2 May&nbsp;</td> </tr> <tr> <td class="ImpDateSubDescription">∙ Journal-first (JIST/JPI) Submissions Due</td> <td class="impdatedatesub">1 Aug</td> </tr> <tr> <td class="ImpDateSubDescription">∙ Final Journal-first manuscripts due</td> <td class="impdatedatesub">28 Oct</td> </tr> <tr> <td class="ImpDateDescription">Conference Papers Submissions</td> <td class="impdate"><br /> </td> </tr> <tr> <td class="ImpDateSubDescription">∙ Abstract Submission Opens</td> <td class="impdatedatesub">1 June</td> </tr> <tr> <td class="ImpDateSubDescription">∙ Priority Decision Submission Ends</td> <td class="impdatedatesub">15 July</td> </tr> <tr> <td class="ImpDateSubDescription">∙ Extended Submission Ends</td> <td class="impdatedatesub">&nbsp;19 Sept</td> </tr> <tr> <td class="ImpDateSubDescription">∙ FastTrack Conference Proceedings Manuscripts Due</td> <td class="impdatedatesub">25 Dec&nbsp;</td> </tr> <tr> <td class="ImpDateSubDescription">∙ All Outstanding Proceedings Manuscripts Due<br /> </td> <td class="impdatedatesub" style="white-space: nowrap;">&nbsp;6 Feb 2023</td> </tr> <tr> <td class="ImpDateDescription">Registration Opens</td> <td class="impdate" style="white-space: nowrap;">1 Dec</td> </tr> <tr> <td class="ImpDateDescription">Demonstration Applications Due</td> <td class="impdate">19 Dec</td> </tr> <tr> <td class="ImpDateDescription">Early Registration Ends</td> <td class="impdate">18 Dec</td> </tr> <tr> <td class="ImpDateDescription"><br /> </td> <td class="impdate" style="text-align: center;"><br /> <span style="font-weight: 500;">2023</span></td> </tr> <tr> <td class="ImpDateDescription">Hotel Reservation Deadline</td> <td class="impdate">6 Jan</td> </tr> <tr> <td class="ImpDateDescription">Symposium begins<br /> </td> <td class="impdate">15 Jan<br /> </td> </tr> <tr> <td class="ImpDateDescription"><br /> </td> <td class="impdate"><br /> </td> </tr> </tbody> </table> </div> </div></div></span> <div translate="yes"> </div> </div> </div> </div> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_downloadContainer" style="display:none;"> <input type="hidden" name="ctl01$TemplateBody$WebPartManager1$gwpciCornerArt$ciCornerArt$HiddenDownloadPathField" id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_HiddenDownloadPathField" /><input type="submit" name="ctl01$TemplateBody$WebPartManager1$gwpciCornerArt$ciCornerArt$downloadButton" value="Download Path" id="ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_downloadButton" style="display:none" /> </div></div> </div> <div class="iMIS-WebPart"> <div id="ste_container_ciSponsors_8f085c685acb4e879ddb7f29f28731f5" class="ContentItemContainer"><div id="ctl01_TemplateBody_WebPartManager1_gwpciSponsors_8f085c685acb4e879ddb7f29f28731f5_ciSponsors_8f085c685acb4e879ddb7f29f28731f5_Panel_Sponsors"> <h3 style="text-align: center;">Sponsor</h3> <br /> <p style="text-align: center;"><a href="https://www.intuitive.com/" target="_blank"><img alt="" src="/images/IST_Images/company_logos/Huge/IntuitiveSurgical_2023_800x267.png" style="width: 200px; height: 68px;" /></a></p> <h3 style="text-align: center;">Best Paper Sponsor</h3> <p style="text-align: center;"> <br /> <a href="https://rivian.com/" target="_blank"><img alt="" src="/images/IST_Images/company_logos/Huge/Rivian%204c%20logo_Gold_Fusion3915x782.png" style="width: 200px; height: 40px;" /></a><br /> <br /> </p> </div></div> </div> </div> <div id="ctl01_TemplateBody_ContentPage1_downloadContainer" style="display:none;"> <input type="hidden" name="ctl01$TemplateBody$ContentPage1$HiddenDownloadPathField" id="ctl01_TemplateBody_ContentPage1_HiddenDownloadPathField" /><input type="submit" name="ctl01$TemplateBody$ContentPage1$downloadButton" value="Download Path" id="ctl01_TemplateBody_ContentPage1_downloadButton" style="display:none" /> </div></div> </div> <div class="col-sm-9"> <div class="ContentItemContainer"> <div id="WebPartZone2_Page1" class="WebPartZone "> <div class="iMIS-WebPart"> <div id="ste_container_ciConfCCO" class="ContentItemContainer"><div class="panel "> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO__Head" class="panel-heading"> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO__BodyContainer" class="panel-body-container"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO__Body" class="panel-body"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_MainContentControl" class="cco tabs-wrapper tabs-horizontal tabs-top"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radTab_Top" class="RadTabStrip RadTabStrip_MetroTouch RadTabStripTop_MetroTouch RadTabStripTop RadTabStripTop_MetroTouch_Baseline"> <div class="rtsLevel rtsLevel1"> <ul class="rtsUL"><li class="rtsLI rtsFirst"><a class="rtsLink rtsBefore" href="#"><span class="rtsOut"><span class="rtsIn"><span class="rtsTxt">About IQSP 2023</span></span></span></a></li><li class="rtsLI"><a class="rtsLink rtsSelected" href="#"><span class="rtsOut"><span class="rtsIn"><span class="rtsTxt">IQSP Program</span></span></span></a></li><li class="rtsLI"><a class="rtsLink rtsAfter" href="#"><span class="rtsOut"><span class="rtsIn"><span class="rtsTxt">For IQSP Authors</span></span></span></a></li><li class="rtsLI rtsLast"><a class="rtsLink" href="#"><span class="rtsOut"><span class="rtsIn"><span class="rtsTxt">IQSP History/Proceedings</span></span></span></a></li></ul> </div><input id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radTab_Top_ClientState" name="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radTab_Top_ClientState" type="hidden" /> </div> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage" class="RadMultiPage RadMultiPage_Default"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_1" class="rmpView rmpHidden"> <div class="ContentTabbedDisplay AddPadding"> <p class="AsiWarning">No content found</p> </div> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_2" class="rmpView"> <div class="ContentWizardDisplay ClearFix"><div> <div class="row"> <div class="col-sm-12"> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Zone1PlaceHolder" class="WebPartZone"> <div id="ste_container_ConferenceHeading" class="ContentItemContainer"><style type="text/css"> /*this is the all-purpose callout. It is behind the keynotes It is grey*/ .callout{ background-color:#f2f2f2; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } /*this is the callout for panels and special events. It is yellow*/ .coloredcallout{ background-color: #fff9e6; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } /*this is the plenary callout. It used to be pink, now it's light purple*/ .pinkcallout{ background-color: #e6e6ff; padding-top: 10px; padding-right: 10px; padding-bottom: 10px; padding-left: 10px; } #content #story .callout .session_title { line-height: normal; margin-bottom: 1px; } .group { font-size: 14pt; font-weight: bold; text-align: center; } .cat { font-size: 16pt; font-weight: bold; color: red; text-align: center; } .session_time { font-weight: 300; font-size: 13px; font-family: Jost, sans-serif; display: inline-block; margin-bottom: 2ex; } .event_time { font-family: 'Jost', sans-serif; font-size: 14px; font-weight: 400; text-align: center; color: #A2002D; padding-bottom: 10px; } .date { font-family: 'Jost', sans-serif; color: #A2002D; background-color: #ffffff; font-size: 18px; font-weight: 500; text-transform: uppercase; margin-top: 11px; margin-bottom: 10px; margin-left: 0; margin-right: 0; padding-top: 7px; padding-bottom: 7px; padding-left: 0px; padding-right: 0; line-height: 1em; } .session_title { font-family: 'Open Sans', sans-serif; font-size: 14px; color: #3b3b3b; font-weight: 600; margin-top: 0ex; margin-bottom: 0ex; } .session_title:before{ content: ' '; display: block; border: 0; border-bottom: 1px solid #A2002D; background: #A2002D; margin-top: 1ex; margin-bottom: 1ex; } .session_title:after { content: ' '; display: block; border: 0; border-bottom: 1px solid #A2002D; background: #A2002D; margin-top: 1ex; margin-bottom: 1ex; } .chair_label { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 600; } .chair { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 400; } p span.author_string { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 400; font-style: italic; } p span.placeholder_desc { font-size: 12pt; font-weight: normal; } .room { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #808080; } .redroom { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #b30000; } .greenroom { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #00802b; } .blueroom { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #002db3; } .yellowroom { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #ffcc00; } .purpleroom { font-family: 'Jost', sans-serif; font-size: 15px; font-weight: 400; text-transform: none; color: #6600CC; } .presentation_title { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 700; } .presentation_time { color: #0C0C0C; font-family: 'Montserrat', sans-serif; font-size: 13px; font-weight: 500; } .session_notes { color: #0C0C0C; font-weight: 300; font-style: italic; font-size: 14px; font-family: Jost, sans-serif; display: block; margin-bottom: 1ex; } .abstract{ color: #0C0C0C; font-weight: 300; /*font-style: italic;*/ font-size: 13px; font-family: Jost, sans-serif; display: block; margin-bottom: 1ex; } .bio { color: #0C0C0C; font-style: italic; font-weight: 400; display: block; margin-bottom: 1ex; } .abstract_link { color: #2e769e; cursor:pointer; } </style> <h1>Image Quality and System Performance XX</h1> <p class="date">Monday 16 January 2023</p> <p class="session_title">20th Anniversary: A Tour of Quality Assessment and System Performance (M1)</p> <span class="chair_label">Session Chair: </span> <span class="chair">Mohamed Chaker Larabi, Université de Poitiers (France)<br> </span> <span class="session_time">8:45 – 10:20 AM</span> <br> <span class="room">Cyril Magnin III<br> </span> <p class="presentation_time" style="text-align:left;">8:45<br> <span class="presentation_title">Conference Welcome, Chaker Larabi</span> </p> <p class="presentation_time" style="text-align:left;">8:50<br> <span class="presentation_title">Twenty years in twenty minutes, Peter Burns</span> </p> <p class="presentation_time" style="text-align:left;">9:10<a name="IQSP-450"></a><span style="float: right;">IQSP-450</span> <br> <span class="presentation_title" final_id="IQSP-450" onclick="toggle_me()" style="cursor: pointer;">Subjective image quality: Beauty and the Beast in human vision (Invited), </span><span class="author_string" final_id="IQSP-450" onclick="toggle_me()" style="cursor: pointer;">Göte S. Nyman</span><span class="author_string" final_id="IQSP-450" onclick="toggle_me()" style="cursor: pointer;">, University of Helsinki (Finland)</span><span class="abstract_link" final_id="IQSP-450" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-450" id="abstract-IQSP-450" onclick="toggle_me()" style="display:none; cursor:pointer;">For the "20th Anniversary: A Tour of Quality Assessment and System Performance" session. The classic problem of objective vs. subjective image quality is getting a new boost when AI meets high-quality human vision and visual experience. With the ever better displays, cameras, image processing tools, generators, and algorithms, the criteria for good and excellent image quality are pushed further from traditional quality metrics. We face the question how to measure and model high subjective image quality and visual experience? So far, there is no standard approach to this in the field of imaging. Subjective image quality metrics are expected to provide relevant data for r&amp;d and computational purposes, and especially for the evolving, automated and ML-based assessment. Ranking, rating, and preference data are not enough. In the talk, historical, methodological and somewhat philosophical background of qualitative methods in image quality assessment and profiling are described and their future possibilities considered. This is based on our twenty-year experience in applying qualitative methods in different imaging contexts, from print and publishing to mobile phone camera development.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">9:45<a name="IQSP-451"></a><span style="float: right;">IQSP-451</span> <br> <span class="presentation_title" final_id="IQSP-451" onclick="toggle_me()" style="cursor: pointer;">Displays and lighting: What do they have in common? (Invited), </span><span class="author_string" final_id="IQSP-451" onclick="toggle_me()" style="cursor: pointer;">Ingrid Heynderickx</span><span class="author_string" final_id="IQSP-451" onclick="toggle_me()" style="cursor: pointer;">, Eindhoven University of Technology (the Netherlands)</span><span class="abstract_link" final_id="IQSP-451" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-451" id="abstract-IQSP-451" onclick="toggle_me()" style="display:none; cursor:pointer;">For the "20th Anniversary: A Tour of Quality Assessment and System Performance" session. Experiments have illustrated that users mainly focus on artefacts when assessing quality of images or light created with still developing technologies. Once the technologies have matured, quality is considered a broader concept more related to the overall viewing experience. Not only this trend is similar between the two largely separated fields of display quality and lighting quality; several of the underlying visual principles have the same origin in the visual system, but are investigated separately for displays and lighting. Examples that will be discussed are color break-up, brightness perception and light adaptation. What can the display community learn from the lighting community and vice versa?</p> <p>&nbsp;</p> <br> <br> <p class="event_time">10:20 – 10:40 AM Coffee Break</p> <div class="callout"> <p class="session_title">20th Anniversary: A Tour of Quality Assessment and System Performance (M2)</p> <span class="chair">Session Chairs: Mohamed Chaker Larabi, Université de Poitiers (France) and Jonathan Phillips, Imatest, LLC (United States)<br> </span><span class="session_time">10:40 AM – 12:30 PM</span> <br> <span class="room">Cyril Magnin III<br> </span> <p class="presentation_time" style="text-align:left;">10:40<a name="IQSP-452"></a><span style="float: right;">IQSP-452</span> <br> <span class="presentation_title" final_id="IQSP-452" onclick="toggle_me()" style="cursor: pointer;">The revolutionary advancement of camera phone image quality (Invited), </span><span class="author_string" final_id="IQSP-452" onclick="toggle_me()" style="cursor: pointer;">Jonathan Phillips</span><span class="author_string" final_id="IQSP-452" onclick="toggle_me()" style="cursor: pointer;">, Imatest, LLC (United States)</span><span class="abstract_link" final_id="IQSP-452" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-452" id="abstract-IQSP-452" onclick="toggle_me()" style="display:none; cursor:pointer;">For the "20th Anniversary: A Tour of Quality Assessment and System Performance" session. The global impact of camera phones is multi-faceted, influencing technological advances, user interface design, cloud storage, and image sharing methodologies. The sheer volume of camera phone ownership has dwarfed the existing number of digital still cameras as the camera phone market segment grew from tens of millions in early acceptance years in Japan to annual global sales volumes of over 1 billion for nearly 10 years and counting. This has enabled and pushed forward revolutionary image quality advancement of the incorporated cameras in the multifunctional devices, progressing from 0.11MP image sensors with 2-inch displays in 1999 to current maximums of 200 MP sensors and 8-inch foldable displays. This overview will provide example images and image quality metrics showing the progression over the past twenty years. Content will also highlight significant technological advancements impacting image quality attributes such as resolution, low light performance, dynamic range, zoom, and bokeh.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:10<a name="IQSP-453"></a><span style="float: right;">IQSP-453</span> <br> <span class="presentation_title" final_id="IQSP-453" onclick="toggle_me()" style="cursor: pointer;">23 years of ISO 12233 resolution measurement (Invited), </span><span class="author_string" final_id="IQSP-453" onclick="toggle_me()" style="cursor: pointer;">Dietmar Wueller</span><span class="author_string" final_id="IQSP-453" onclick="toggle_me()" style="cursor: pointer;">, Image Engineering GmbH &amp; Co. KG (Germany)</span><span class="abstract_link" final_id="IQSP-453" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-453" id="abstract-IQSP-453" onclick="toggle_me()" style="display:none; cursor:pointer;">For the "20th Anniversary: A Tour of Quality Assessment and System Performance" session. Thirty years ago, ISO/TC42 WG18, a newly created ISO working group on digital photography, began developing a standard to measure the spatial resolution of digital cameras. After years of proposals, testing, and analysis, consensus was reached on a test chart with tilted edge features for measuring spatial frequency response (SFR) and hyperbolic wedges for measuring visual and limiting resolution. The group ensured that the test chart and analysis software would be available internationally. First published in 2000, ISO 12233 is now used to measure cameras in a wide range of applications. It was revised in 2014 to define three new charts, a sine wave modulated target in polar format, a low contrast e-SFR target, and the CIPA chart with software which computes a “human equivalent visual resolution” value. Edition 4 of ISO 12233 will soon be published. It expands the e-SFR measurement with a polynomial fit function in case of high distortion levels, adds the analysis of sagittal and tangential edge orientation, specifies a way to determine acutance from the measured SFRs and a compensation for non-uniform illumination.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:20<a name="IQSP-456"></a><span style="float: right;">IQSP-456</span> <br> <span class="presentation_title" final_id="IQSP-456" onclick="toggle_me()" style="cursor: pointer;">Limits of MTF in practice (Invited), </span><span class="author_string" final_id="IQSP-456" onclick="toggle_me()" style="cursor: pointer;">Alexander Braun</span><span class="author_string" final_id="IQSP-456" onclick="toggle_me()" style="cursor: pointer;">, Düsseldorf University of Applied Sciences (Germany)</span><span class="abstract_link" final_id="IQSP-456" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-456" id="abstract-IQSP-456" onclick="toggle_me()" style="display:none; cursor:pointer;">For the "20th Anniversary: A Tour of Quality Assessment and System Performance" session. The modulation transfer function (MTF) is one of the most established metrics to gauge ’sharpness’ of imaging systems. Solidly based on linear system theory it has been standardized decades ago, and the main standard ISO12233 is constantly being evolved and improved — as demonstrated by the many talks and discussions at this EI2023. In automotive mass production, though, the MTF is difficult to implement in a stable and reproducible way. Also, it is currently not traceable to fundamental SI units or, more practically speaking, to the norming institutes like NIST (Boulder, CO) or the PTB (Germany). This is important, as the MTF is used End-of-Line in mass production to validate the correct operation of the produced camera system. Finally, our research results indicate that for some circumstances the MTF as a metric does not correlate well to the performance of ML/AI-based algorithms, which are a pillar of modern Computer Vision.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:30<a name="IQSP-454"></a><span style="float: right;">IQSP-454</span> <br> <span class="presentation_title" final_id="IQSP-454" onclick="toggle_me()" style="cursor: pointer;">Measuring camera information capacity with slanted-edges (Invited), </span><span class="author_string" final_id="IQSP-454" onclick="toggle_me()" style="cursor: pointer;">Norman Koren</span><span class="author_string" final_id="IQSP-454" onclick="toggle_me()" style="cursor: pointer;">, Imatest LLC (United States)</span><span class="abstract_link" final_id="IQSP-454" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-454" id="abstract-IQSP-454" onclick="toggle_me()" style="display:none; cursor:pointer;">For the "20th Anniversary: A Tour of Quality Assessment and System Performance" session. We describe a new calculation of camera information capacity, C, derived from standard 4:1 contrast ratio slanted edges, that takes advantage of an overlooked capability of the slanted edge that allows the variance and hence the noise of the edge to be calculated in addition to the mean. The average signal and noise power derived from the edge can be entered into the Shannon-Hartley equation to calculate the information capacity of the 4:1 edge signal, C[4]. Since C[4] is highly sensitive to exposure, we have developed a more consistent metric, C[max], derived from the maximum allowed signal in the file, making it an excellent approximation of the camera’s maximum information capacity. Information capacities C[4] and C[max] are excellent figures of merit for system performance because they combine the effects of MTF and noise. They have great potential for predicting the performance of Machine Vision and Artificial Intelligence systems. They are easy to calculate, requiring no extra effort beyond the standard slanted-edge MTF calculation.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:40<a name="IQSP-455"></a><span style="float: right;">IQSP-455</span> <br> <span class="presentation_title" final_id="IQSP-455" onclick="toggle_me()" style="cursor: pointer;">From BxU to integrated information capacity, a brief history of MTF based KPIs at DXOMARK (Invited), </span><span class="author_string" final_id="IQSP-455" onclick="toggle_me()" style="cursor: pointer;">Laurent Chanas</span><span class="author_string" final_id="IQSP-455" onclick="toggle_me()" style="cursor: pointer;">, DxOMark Image Labs (France)</span><span class="abstract_link" final_id="IQSP-455" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-455" id="abstract-IQSP-455" onclick="toggle_me()" style="display:none; cursor:pointer;">For the "20th Anniversary: A Tour of Quality Assessment and System Performance" session. The MTF curve needs an expert to understand its meaning. DXOMARK has tried different ways to convert the MTF into a scalar that has a meaning for less expert peoples. We have proposed the new metric named BxU, then used the classical acutance. Finally, we have proposed a way to compute the Perceptual-Mpix, which is linked to the information capacity of a camera. This new concept encompasses both the sensor noise level and the MTF to provide the maximum information than can be captured in the image provided by the sensor. We have recently proposed a new computation of the information capacity of a camera, by using all the lens and sensor characterization, and a radial model of the lens defects.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:50<br> <span class="presentation_title">Panel Discussion</span> </p> </div> <br> <br> <br> <p class="event_time">12:30 – 2:00 PM Lunch</p> <div class="pinkcallout"> <p class="session_title">Monday 16 January PLENARY: Neural Operators for Solving PDEs</p> <span class="chair">Session Chair: Robin Jenkin, NVIDIA Corporation (United States)<br> </span> <span class="session_time">2:00 PM – 3:00 PM</span> <br> <span class="room">Cyril Magnin I/II/III<br> </span> <span></span> <p class="session_notes">Deep learning surrogate models have shown promise in modeling complex physical phenomena such as fluid flows, molecular dynamics, and material properties. However, standard neural networks assume finite-dimensional inputs and outputs, and hence, cannot withstand a change in resolution or discretization between training and testing. We introduce Fourier neural operators that can learn operators, which are mappings between infinite dimensional spaces. They are independent of the resolution or grid of training data and allow for zero-shot generalization to higher resolution evaluations. When applied to weather forecasting, neural operators capture fine-scale phenomena and have similar skill as gold-standard numerical weather models for predictions up to a week or longer, while being 4-5 orders of magnitude faster.</p> <br> <span></span> <p class="session_notes"> </p> <span class="author_string"><strong>Anima Anandkumar, </strong>Bren professor, California Institute of Technology, and senior director of AI Research, NVIDIA Corporation (United States)<span class="author_string"></span></span> <p>&nbsp;</p> <span></span> <p class="session_notes">Anima Anandkumar is a Bren Professor at Caltech and Senior Director of AI Research at NVIDIA. She is passionate about designing principled AI algorithms and applying them to interdisciplinary domains. She has received several honors such as the IEEE fellowship, Alfred. P. Sloan Fellowship, NSF Career Award, and Faculty Fellowships from Microsoft, Google, Facebook, and Adobe. She is part of the World Economic Forum's Expert Network. Anandkumar received her BTech from Indian Institute of Technology Madras, her PhD from Cornell University, and did her postdoctoral research at MIT and assistant professorship at University of California Irvine.</p> </div> <br> <p class="event_time">3:00 – 3:30 PM Coffee Break</p> <p class="session_title">Subjective Quality Assessment (M3)</p> <span class="chair_label">Session Chair: </span> <span class="chair">Sophie Triantaphillidou, University of Westminster (United Kingdom)<br> </span> <span class="session_time">3:30 – 4:50 PM</span> <br> <span class="room">Cyril Magnin III<br> </span> <p class="presentation_time" style="text-align:left;">3:30<a name="IQSP-301"></a><span style="float: right;">IQSP-301</span> <br> <span class="presentation_title" final_id="IQSP-301" onclick="toggle_me()" style="cursor: pointer;">Image demosaicing: Subjective analysis and evaluation of image quality metrics, </span><span class="author_string" final_id="IQSP-301" onclick="toggle_me()" style="cursor: pointer;">Tawsin Uddin Ahmed, </span><span class="author_string" final_id="IQSP-301" onclick="toggle_me()" style="cursor: pointer;">Seyed Ali Amirshahi, and </span><span class="author_string" final_id="IQSP-301" onclick="toggle_me()" style="cursor: pointer;">Marius Pedersen</span><span class="author_string" final_id="IQSP-301" onclick="toggle_me()" style="cursor: pointer;">, Norwegian University of Science and Technology (Norway)</span><span class="abstract_link" final_id="IQSP-301" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-301" id="abstract-IQSP-301" onclick="toggle_me()" style="display:none; cursor:pointer;">Most cameras use a single-sensor arrangement with Color Filter Array (CFA). Color interpolation techniques performed during image demosaicing are normally the reason behind visual artifacts generated in a captured image. While the severity of the artifacts depends on the demosaicing methods used, the artifacts themselves are mainly zipper artifacts (block artifacts across the edges) and false-color distortions. In this study and to evaluate the performance of demosaicing methods, a subjective pair-comparison method with 15 observers was performed on six different methods (namely Nearest Neighbours, Bilinear interpolation, Laplacian, Adaptive Laplacian, Smooth hue transition, and Gradient-Based image interpolation) and nine different scenes. The subjective scores and scene images are then collected as a dataset and used to evaluate a set of no-reference image quality metrics. Assessment of the performance of these image quality metrics in terms of correlation with the subjective scores show that many of the evaluated no-reference metrics cannot predict perceived image quality.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">3:50<a name="IQSP-302"></a><span style="float: right;">IQSP-302</span> <br> <span class="presentation_title" final_id="IQSP-302" onclick="toggle_me()" style="cursor: pointer;">Age-specific perceptual image quality assessment, </span><span class="author_string" final_id="IQSP-302" onclick="toggle_me()" style="cursor: pointer;">Yinan Wang<sup>1</sup>, </span><span class="author_string" final_id="IQSP-302" onclick="toggle_me()" style="cursor: pointer;">Andrei Chubarau<sup>1</sup>, </span><span class="author_string" final_id="IQSP-302" onclick="toggle_me()" style="cursor: pointer;">Tara Akhavan<sup>2</sup>, </span><span class="author_string" final_id="IQSP-302" onclick="toggle_me()" style="cursor: pointer;">Hyunjin Yoo<sup>2</sup>, and </span><span class="author_string" final_id="IQSP-302" onclick="toggle_me()" style="cursor: pointer;">James Clark<sup>1</sup></span><span class="author_string" final_id="IQSP-302" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>McGill University and <sup>2</sup>Forvia (Canada)</span><span class="abstract_link" final_id="IQSP-302" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-302" id="abstract-IQSP-302" onclick="toggle_me()" style="display:none; cursor:pointer;">With the development of image-based applications, assessing the quality of images has become increasingly important. Although our perception of image quality changes as we age, most existing image quality assessment (IQA) metrics make simplifying assumptions about the age of observers, thus limiting their use for age-specific applications. In this work, we propose a personalized IQA metric to assess the perceived image quality of observers from different age groups. Firstly, we apply an age simulation algorithm to compute how an observer with a particular age would perceive a given image. More specifically, we process the input image according to an age-specific contrast sensitivity function (CSF), which predicts the reduction of contrast visibility associated with the aging eye. We combine age simulation with existing IQA metrics to calculate the age-specific perceived image quality score. To validate the effectiveness of our combined model, we conducted a psychophysical experiment in a controlled laboratory environment with young (18-31 y.o.), middle-aged (32-52 y.o.), and older (53+ y.o.) adults, measuring their image quality preferences for 84 test images. Our analysis shows that the predictions by our age-specific IQA metric are well correlated with the collected subjective IQA results from our psychophysical experiment.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">4:10<a name="IQSP-303"></a><span style="float: right;">IQSP-303</span> <br> <span class="presentation_title" final_id="IQSP-303" onclick="toggle_me()" style="cursor: pointer;">A method for evaluating camera auto-focusing performance using a transparent display device, </span><span class="author_string" final_id="IQSP-303" onclick="toggle_me()" style="cursor: pointer;">Seungwan Jeon, </span><span class="author_string" final_id="IQSP-303" onclick="toggle_me()" style="cursor: pointer;">Kichul Park, </span><span class="author_string" final_id="IQSP-303" onclick="toggle_me()" style="cursor: pointer;">Sung-Su Kim, and </span><span class="author_string" final_id="IQSP-303" onclick="toggle_me()" style="cursor: pointer;">Yitae Kim</span><span class="author_string" final_id="IQSP-303" onclick="toggle_me()" style="cursor: pointer;">, Samsung Electronics (Republic of Korea)</span><span class="abstract_link" final_id="IQSP-303" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-303" id="abstract-IQSP-303" onclick="toggle_me()" style="display:none; cursor:pointer;">With the development of various autofocusing (AF) technologies, sensor manufacturers are demanded to evaluate their performance accurately. The basic method of evaluating AF performance is to measure the time to get the refocused image and the sharpness of the image while repeatedly inducing the refocusing process. Traditionally, this process was conducted manually by covering and uncovering an object or sensor repeatedly, which can lead to unreliable results due to the human error and light blocking method. To deal with this problem, we propose a new device and solutions using a transparent display. Our method can provide more reliable results than the existing method by modulating the opacity, pattern, and repetition cycle of the target on the transparent display.</p> <p>&nbsp;</p> <br> <br> <span> </span> <div class="pinkcallout"> <p class="session_title">EI 2023 Highlights Session</p> <span class="chair">Session Chair: Robin Jenkin, NVIDIA Corporation (United States)<br> </span> <span class="session_time">3:30 – 5:00 PM</span><br> <span class="room">Cyril Magnin II<br> </span> <p class="session_notes">Join us for a session that celebrates the breadth of what EI has to offer with short papers selected from EI conferences. </p> <p class="session_notes">NOTE: The EI-wide "EI 2023 Highlights" session is concurrent with Monday afternoon COIMG, COLOR, IMAGE, and IQSP conference sessions. </p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"> <span>IQSP-309</span> <br> <span class="presentation_title" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Evaluation of image quality metrics designed for DRI tasks with automotive cameras, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Valentine Klein, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Yiqi LI, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Claudio Greco, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Laurent Chanas, and </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Frédéric Guichard</span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">, DXOMARK (France)</span><span class="abstract_link" final_id="IQSP-309" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-309" id="abstract-IQSP-309" onclick="toggle_me()" style="display:none; cursor:pointer;">Driving assistance is increasingly used in new car models. Most driving assistance systems are based on automotive cameras and computer vision. Computer Vision, regardless of the underlying algorithms and technology, requires the images to have good image quality, defined according to the task. This notion of good image quality is still to be defined in the case of computer vision as it has very different criteria than human vision: humans have a better contrast detection ability than image chains. The aim of this article is to compare three different metrics designed for detection of objects with computer vision: the Contrast Detection Probability (CDP) [1, 2, 3, 4], the Contrast Signal to Noise Ratio (CSNR) [5] and the Frequency of Correct Resolution (FCR) [6]. For this purpose, the computer vision task of reading the characters on a license plate will be used as a benchmark. The objective is to check the correlation between the objective metric and the ability of a neural network to perform this task. Thus, a protocol to test these metrics and compare them to the output of the neural network has been designed and the pros and cons of each of these three metrics have been noted.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"><span>SD&amp;A-224</span> <br> <span class="presentation_title" final_id="SD&amp;A-224" onclick="toggle_me()" style="cursor: pointer;">Human performance using stereo 3D in a helmet mounted display and association with individual stereo acuity, </span><span class="author_string" final_id="SD&amp;A-224" onclick="toggle_me()" style="cursor: pointer;">Bonnie Posselt</span><span class="author_string" final_id="SD&amp;A-224" onclick="toggle_me()" style="cursor: pointer;">, RAF Centre of Aviation Medicine (United Kingdom)</span><span class="abstract_link" final_id="SD&amp;A-224" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="SD&amp;A-224" id="abstract-SD&amp;A-224" onclick="toggle_me()" style="display:none; cursor:pointer;">Binocular Helmet Mounted Displays (HMDs) are a critical part of the aircraft system, allowing information to be presented to the aviator with stereoscopic 3D (S3D) depth, potentially enhancing situational awareness and improving performance. The utility of S3D in an HMD may be linked to an individual’s ability to perceive changes in binocular disparity (stereo acuity). Though minimum stereo acuity standards exist for most military aviators, current test methods may be unable to characterise this relationship. This presentation will investigate the effect of S3D on performance when used in a warning alert displayed in an HMD. Furthermore, any effect on performance, ocular symptoms, and cognitive workload shall be evaluated in regard to individual stereo acuity measured with a variety of paper-based and digital stereo tests.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"> <span>IMAGE-281</span> <br> <span class="presentation_title" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Smartphone-enabled point-of-care blood hemoglobin testing with color accuracy-assisted spectral learning, </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Sang Mok Park<sup>1</sup>, </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Yuhyun Ji<sup>1</sup>, </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Semin Kwon<sup>1</sup>, </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Andrew R. O’Brien<sup>2</sup>, </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Ying Wang<sup>2</sup>, and </span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">Young L. Kim<sup>1</sup></span><span class="author_string" final_id="IMAGE-281" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Purdue University and <sup>2</sup>Indiana University School of Medicine (United States)</span><span class="abstract_link" final_id="IMAGE-281" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IMAGE-281" id="abstract-IMAGE-281" onclick="toggle_me()" style="display:none; cursor:pointer;">We develop an mHealth technology for noninvasively measuring blood Hgb levels in patients with sickle cell anemia, using the photos of peripheral tissue acquired by the built-in camera of a smartphone. As an easily accessible sensing site, the inner eyelid (i.e., palpebral conjunctiva) is used because of the relatively uniform microvasculature and the absence of skin pigments. Color correction (color reproduction) and spectral learning (spectral super-resolution spectroscopy) algorithms are integrated for accurate and precise mHealth blood Hgb testing. First, color correction using a color reference chart with multiple color patches extracts absolute color information of the inner eyelid, compensating for smartphone models, ambient light conditions, and data formats during photo acquisition. Second, spectral learning virtually transforms the smartphone camera into a hyperspectral imaging system, mathematically reconstructing high-resolution spectra from color-corrected eyelid images. Third, color correction and spectral learning algorithms are combined with a spectroscopic model for blood Hgb quantification among sickle cell patients. Importantly, single-shot photo acquisition of the inner eyelid using the color reference chart allows straightforward, real-time, and instantaneous reading of blood Hgb levels. Overall, our mHealth blood Hgb tests could potentially be scalable, robust, and sustainable in resource-limited and homecare settings.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"> <span>AVM-118</span> <br> <span class="presentation_title" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Designing scenes to quantify the performance of automotive perception systems, </span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Zhenyi Liu<sup>1</sup>, </span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Devesh Shah<sup>2</sup>, </span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Alireza Rahimpour<sup>2</sup>, </span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Joyce Farrell<sup>1</sup>, and </span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">Brian Wandell<sup>1</sup></span><span class="author_string" final_id="AVM-118" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Stanford University and <sup>2</sup>Ford Motor Company (United States)</span><span class="abstract_link" final_id="AVM-118" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="AVM-118" id="abstract-AVM-118" onclick="toggle_me()" style="display:none; cursor:pointer;">We implemented an end-to-end simulation for perception systems, based on cameras, that are used in automotive applications. The open-source software creates complex driving scenes and simulates cameras that acquire images of these scenes. The camera images are then used by a neural network in the perception system to identify the locations of scene objects, providing the results as input to the decision system. In this paper, we design collections of test scenes that can be used to quantify the perception system’s performance under a range of (a) environmental conditions (object distance, occlusion ratio, lighting levels), and (b) camera parameters (pixel size, lens type, color filter array). We are designing scene collections to analyze performance for detecting vehicles, traffic signs and vulnerable road users in a range of environmental conditions and for a range of camera parameters. With experience, such scene collections may serve a role similar to that of standardized test targets that are used to quantify camera image quality (e.g., acuity, color).</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"> <span>VDA-403</span> <br> <span class="presentation_title" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Visualizing and monitoring the process of injection molding, </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Christian A. Steinparz<sup>1</sup>, </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Thomas Mitterlehner<sup>2</sup>, </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Bernhard Praher<sup>2</sup>, </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Klaus Straka<sup>1,</sup><sup>2</sup>, </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Holger Stitz<sup>1,</sup><sup>3</sup>, and </span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">Marc Streit<sup>1,</sup><sup>3</sup></span><span class="author_string" final_id="VDA-403" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Johannes Kepler University, <sup>2</sup>Moldsonics GmbH, and <sup>3</sup>datavisyn GmbH (Austria)</span><span class="abstract_link" final_id="VDA-403" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="VDA-403" id="abstract-VDA-403" onclick="toggle_me()" style="display:none; cursor:pointer;">In injection molding machines the molds are rarely equipped with sensor systems. The availability of non-invasive ultrasound-based in-mold sensors provides better means for guiding operators of injection molding machines throughout the production process. However, existing visualizations are mostly limited to plots of temperature and pressure over time. In this work, we present the result of a design study created in collaboration with domain experts. The resulting prototypical application uses real-world data taken from live ultrasound sensor measurements for injection molding cavities captured over multiple cycles during the injection process. Our contribution includes a definition of tasks for setting up and monitoring the machines during the process, and the corresponding web-based visual analysis tool addressing these tasks. The interface consists of a multi-view display with various levels of data aggregation that is updated live for newly streamed data of ongoing injection cycles.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"> <span>COIMG-155</span> <br> <span class="presentation_title" final_id="COIMG-155" onclick="toggle_me()" style="cursor: pointer;">Commissioning the James Webb Space Telescope, </span><span class="author_string" final_id="COIMG-155" onclick="toggle_me()" style="cursor: pointer;">Joseph M. Howard</span><span class="author_string" final_id="COIMG-155" onclick="toggle_me()" style="cursor: pointer;">, NASA Goddard Space Flight Center (United States)</span><span class="abstract_link" final_id="COIMG-155" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COIMG-155" id="abstract-COIMG-155" onclick="toggle_me()" style="display:none; cursor:pointer;">Astronomy is arguably in a golden age, where current and future NASA space telescopes are expected to contribute to this rapid growth in understanding of our universe. The most recent addition to our space-based telescopes dedicated to astronomy and astrophysics is the James Webb Space Telescope (JWST), which launched on 25 December 2021. This talk will discuss the first six months in space for JWST, which were spent commissioning the observatory with many deployments, alignments, and system and instrumentation checks. These engineering activities help verify the proper working of the telescope prior to commencing full science operations. For the session: Computational Imaging using Fourier Ptychography and Phase Retrieval.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"> <span>HVEI-223</span> <br> <span class="presentation_title" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">Critical flicker frequency (CFF) at high luminance levels, </span><span class="author_string" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">Alexandre Chapiro<sup>1</sup>, </span><span class="author_string" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">Nathan Matsuda<sup>1</sup>, </span><span class="author_string" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">Maliha Ashraf<sup>2</sup>, and </span><span class="author_string" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">Rafal Mantiuk<sup>3</sup></span><span class="author_string" final_id="HVEI-223" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Meta (United States), <sup>2</sup>University of Liverpool (United Kingdom), and <sup>3</sup>University of Cambridge (United Kingdom)</span><span class="abstract_link" final_id="HVEI-223" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="HVEI-223" id="abstract-HVEI-223" onclick="toggle_me()" style="display:none; cursor:pointer;">The critical flicker fusion (CFF) is the frequency of changes at which a temporally periodic light will begin to appear completely steady to an observer. This value is affected by several visual factors, such as the luminance of the stimulus or its location on the retina. With new high dynamic range (HDR) displays, operating at higher luminance levels, and virtual reality (VR) displays, presenting at wide fields-of-view, the effective CFF may change significantly from values expected for traditional presentation. In this work we use a prototype HDR VR display capable of luminances up to 20,000 cd/m^2 to gather a novel set of CFF measurements for never before examined levels of luminance, eccentricity, and size. Our data is useful to study the temporal behavior of the visual system at high luminance levels, as well as setting useful thresholds for display engineering.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"> <span>HPCI-228</span> <br> <span class="presentation_title" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Physics guided machine learning for image-based material decomposition of tissues from simulated breast models with calcifications, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Muralikrishnan Gopalakrishnan Meena<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Amir K. Ziabari<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Singanallur Venkatakrishnan<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Isaac R. Lyngaas<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Matthew R. Norman<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Balint Joo<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Thomas L. Beck<sup>1</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Charles A. Bouman<sup>2</sup>, </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Anuj Kapadia<sup>1</sup>, and </span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">Xiao Wang<sup>1</sup></span><span class="author_string" final_id="HPCI-228" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Oak Ridge National Laboratory and <sup>2</sup>Purdue University (United States)</span><span class="abstract_link" final_id="HPCI-228" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="HPCI-228" id="abstract-HPCI-228" onclick="toggle_me()" style="display:none; cursor:pointer;">Material decomposition of Computed Tomography (CT) scans using projection-based approaches, while highly accurate, poses a challenge for medical imaging researchers and clinicians due to limited or no access to projection data. We introduce a deep learning image-based material decomposition method guided by physics and requiring no access to projection data. The method is demonstrated to decompose tissues from simulated dual-energy X-ray CT scans of virtual human phantoms containing four materials - adipose, fibroglandular, calcification, and air. The method uses a hybrid unsupervised and supervised learning technique to tackle the material decomposition problem. We take advantage of the unique X-ray absorption rate of calcium compared to body tissues to perform a preliminary segmentation of calcification from the images using unsupervised learning. We then perform supervised material decomposition using a deep learned UNET model which is trained using GPUs in the high-performant systems at the Oak Ridge Leadership Computing Facility. The method is demonstrated on simulated breast models to decompose calcification, adipose, fibroglandular, and air.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"> <span>3DIA-104</span> <br> <span class="presentation_title" final_id="3DIA-104" onclick="toggle_me()" style="cursor: pointer;">Layered view synthesis for general images, </span><span class="author_string" final_id="3DIA-104" onclick="toggle_me()" style="cursor: pointer;">Loïc Dehan, </span><span class="author_string" final_id="3DIA-104" onclick="toggle_me()" style="cursor: pointer;">Wiebe Van Ranst, and </span><span class="author_string" final_id="3DIA-104" onclick="toggle_me()" style="cursor: pointer;">Patrick Vandewalle</span><span class="author_string" final_id="3DIA-104" onclick="toggle_me()" style="cursor: pointer;">, Katholieke University Leuven (Belgium)</span><span class="abstract_link" final_id="3DIA-104" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="3DIA-104" id="abstract-3DIA-104" onclick="toggle_me()" style="display:none; cursor:pointer;">We describe a novel method for monocular view synthesis. The goal of our work is to create a visually pleasing set of horizontally spaced views based on a single image. This can be applied in view synthesis for virtual reality and glasses-free 3D displays. Previous methods produce realistic results on images that show a clear distinction between a foreground object and the background. We aim to create novel views in more general, crowded scenes in which there is no clear distinction. Our main contributions are a computationally efficient method for realistic occlusion inpainting and blending, especially in complex scenes. Our method can be effectively applied to any image, which is shown both qualitatively and quantitatively on a large dataset of stereo images. Our method performs natural disocclusion inpainting and maintains the shape and edge quality of foreground objects.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"> <span>ISS-329</span> <br> <span class="presentation_title" final_id="ISS-329" onclick="toggle_me()" style="cursor: pointer;">A self-powered asynchronous image sensor with independent in-pixel harvesting and sensing operations, </span><span class="author_string" final_id="ISS-329" onclick="toggle_me()" style="cursor: pointer;">Ruben Gomez-Merchan, </span><span class="author_string" final_id="ISS-329" onclick="toggle_me()" style="cursor: pointer;">Juan Antonio Leñero-Bardallo, and </span><span class="author_string" final_id="ISS-329" onclick="toggle_me()" style="cursor: pointer;">Ángel Rodríguez-Vázquez</span><span class="author_string" final_id="ISS-329" onclick="toggle_me()" style="cursor: pointer;">, University of Seville (Spain)</span><span class="abstract_link" final_id="ISS-329" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="ISS-329" id="abstract-ISS-329" onclick="toggle_me()" style="display:none; cursor:pointer;">A new self-powered asynchronous sensor with a novel pixel architecture is presented. Pixels are autonomous and can harvest or sense energy independently. During the image acquisition, pixels toggle to a harvesting operation mode once they have sensed their local illumination level. With the proposed pixel architecture, most illuminated pixels provide an early contribution to power the sensor, while low illuminated ones spend more time sensing their local illumination. Thus, the equivalent frame rate is higher than the offered by conventional self-powered sensors that harvest and sense illumination in independient phases. The proposed sensor uses a Time-to-First-Spike readout that allows trading between image quality and data and bandwidth consumption. The sensor has HDR operation with a dynamic range of 80 dB. Pixel power consumption is only 70 pW. In the article, we describe the sensor’s and pixel’s architectures in detail. Experimental results are provided and discussed. Sensor specifications are benchmarked against the art.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;"><span>COLOR-184</span> <br> <span class="presentation_title" final_id="COLOR-184" onclick="toggle_me()" style="cursor: pointer;">Color blindness and modern board games, </span><span class="author_string" final_id="COLOR-184" onclick="toggle_me()" style="cursor: pointer;">Alessandro Rizzi<sup>1</sup> and </span><span class="author_string" final_id="COLOR-184" onclick="toggle_me()" style="cursor: pointer;">Matteo Sassi<sup>2</sup></span><span class="author_string" final_id="COLOR-184" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Università degli Studi di Milano and <sup>2</sup>consultant (Italy)</span><span class="abstract_link" final_id="COLOR-184" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="COLOR-184" id="abstract-COLOR-184" onclick="toggle_me()" style="display:none; cursor:pointer;">Board game industry is experiencing a strong renewed interest. In the last few years, about 4000 new board games have been designed and distributed each year. Board game players gender balance is reaching the equality, but nowadays the male component is a slight majority. This means that (at least) around 10% of board game players are color blind. How does the board game industry deal with this ? Recently, a raising of awareness in the board game design has started but so far there is a big gap compared with (e.g.) the computer game industry. This paper presents some data about the actual situation, discussing exemplary cases of successful board games.</p> <p>&nbsp;</p> <script> function toggle_me() { var elm = event.srcElement var final_id = elm.getAttribute("final_id") var the_id = "abstract-" + final_id; var x = document.getElementById(the_id); if (x.style.display === "none"){ x.style.display = "block"; } else { x.style.display = "none"; } }</script> </div> <br> <span> </span> <p class="event_time">5:00 – 6:15 PM EI 2023 All-Conference Welcome Reception (in the Cyril Magnin Foyer)</p> <p class="date">Tuesday 17 January 2023</p> <span> </span> <div class="callout"> <p class="session_title">KEYNOTE: Perceptual Video Quality 1 (T1)<img alt="" class="flag_image" src="http://www.imaging.org/images/IST_Images/Conferences/EI/Joint-Session.png" style="vertical-align: middle; margin-left:1em;"></p> <span class="chair">Session Chairs: Lukáš Krasula, Netflix, Inc. (United States) and Mohamed Chaker Larabi, Université de Poitiers (France)<br> </span><span class="session_time">9:05 – 10:10 AM</span> <br> <span class="room">Cyril Magnin III </span> <br> <span></span> <p class="session_notes">This session is jointly sponsored by: Human Vision and Electronic Imaging 2023, and Image Quality and System Performance XX. </p> <br> <p class="presentation_time" style="text-align:left;"> <br> <span class="presentation_title">Joint Conference Welcome</span> </p> <p class="presentation_time" style="text-align:left;"> <a name="HVEI-258"></a><a name="HVEI-258"></a><span style="float: right;">HVEI-258</span> <br> <span class="presentation_title" final_id="HVEI-258" onclick="toggle_me()" style="cursor: pointer;">KEYNOTE: Bringing joy to Netflix members through perceptual encoding optimization, </span><span class="author_string" final_id="HVEI-258" onclick="toggle_me()" style="cursor: pointer;">Anne Aaron</span><span class="author_string" final_id="HVEI-258" onclick="toggle_me()" style="cursor: pointer;">, Netflix, Inc. (United States)</span><span class="abstract_link" final_id="HVEI-258" onclick="toggle_me()"> [view abstract] </span></p> <p class="session_notes">&nbsp;</p> <p class="session_notes">As Director of Encoding Technologies, Anne Aaron leads the team responsible for media processing and encoding at Netflix. Her team works on video, audio, images and timed-text, from analysis to processing, encoding, packaging and DRM. On the streaming side, they strive to deliver a compelling viewing experience for millions of Netflix members worldwide, no matter where, how and what they watch. For the Netflix studio, they build media technologies that can improve content production. In her previous role at Netflix, Aaron led the Video Algorithms team. As a team, they researched and deployed innovation in the video encoding space (per-title encoding, video quality assessment and perceptual metrics, shot-based encoding, HDR, next-generation codecs) that benefited Netflix members as well as impacted the rest of the industry. Recent recognitions include: Some recent recognitions: SMPTE 2019 Workflow Systems Medal, Forbes' 2018 America's top women in Tech, Business Insider's 2017 Most powerful female engineers in US tech in 2017.</p> <p class="abstract" final_id="HVEI-258" id="abstract-HVEI-258" onclick="toggle_me()" style="display:none; cursor:pointer;">Audio and video compression are immensely important to Netflix, as well as internet service providers (ISPs). It has been estimated that our codec optimization efforts, together with the Open Connect program, saved ISPs over 1 billion dollars in 2021 alone. The keynote will talk about the importance of perceptual models and optimization for delivering the hits such as Stranger Things, Squid Game, or Red Notice in the highest quality while being mindful of the internet traffic. It will cover the recent advances in audio and video encoding, innovations in the subjective and objective assessment of quality, as well as immediate and future challenges in this area.</p> <p>&nbsp;</p> </div> <br> <p class="event_time">10:00 AM – 7:30 PM Industry Exhibition - Tuesday (in the Cyril Magnin Foyer)</p> <p class="event_time">10:20 – 10:50 AM Coffee Break</p> <br> <br> <p class="session_title">Perceptual Video Quality 2 (T2)<img alt="" class="flag_image" src="http://www.imaging.org/images/IST_Images/Conferences/EI/Joint-Session.png" style="vertical-align: middle; margin-left:1em;"></p> <span class="chair_label">Session Chairs: </span> <span class="chair">Lukáš Krasula, Netflix, Inc. (United States) and Mohamed Chaker Larabi, Université de Poitiers (France)<br> </span> <span class="session_time">10:50 AM – 12:30 PM</span> <br> <span class="room">Cyril Magnin III </span> <br> <span></span> <p class="session_notes">This session is jointly sponsored by: Human Vision and Electronic Imaging 2023, and Image Quality and System Performance XX.</p> <br> <p class="presentation_time" style="text-align:left;">10:50<a name="HVEI-259"></a><a name="HVEI-259"></a><span style="float: right;">HVEI-259</span> <br> <span class="presentation_title" final_id="HVEI-259" onclick="toggle_me()" style="cursor: pointer;">Video quality of video professionals for Video Assisted Referee (VAR), </span><span class="author_string" final_id="HVEI-259" onclick="toggle_me()" style="cursor: pointer;">Kjell Brunnström<sup>1,</sup><sup>2</sup>, </span><span class="author_string" final_id="HVEI-259" onclick="toggle_me()" style="cursor: pointer;">Anders Djupsjöbacka<sup>1</sup>, </span><span class="author_string" final_id="HVEI-259" onclick="toggle_me()" style="cursor: pointer;">Johsan Billingham<sup>3</sup>, </span><span class="author_string" final_id="HVEI-259" onclick="toggle_me()" style="cursor: pointer;">Katharina Wistel<sup>3</sup>, </span><span class="author_string" final_id="HVEI-259" onclick="toggle_me()" style="cursor: pointer;">Börje Andrén<sup>1</sup>, </span><span class="author_string" final_id="HVEI-259" onclick="toggle_me()" style="cursor: pointer;">Oskars Ozolins<sup>1,</sup><sup>4</sup>, and </span><span class="author_string" final_id="HVEI-259" onclick="toggle_me()" style="cursor: pointer;">Nicolas Evans<sup>3</sup></span><span class="author_string" final_id="HVEI-259" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>RISE Research Institutes of Sweden AB (Sweden), <sup>2</sup>Mid Sweden University (Sweden), <sup>3</sup>Fédération Internationale de Football Association (FIFA) (Switzerland), and <sup>4</sup>KTH (Royal Institute of Technology) (Sweden)</span><span class="abstract_link" final_id="HVEI-259" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="HVEI-259" id="abstract-HVEI-259" onclick="toggle_me()" style="display:none; cursor:pointer;">Changes in the footballing world’s approach to technology and innovation contributed to the decision by the International Football Association Board (IFAB) to introduce Video Assistant Referees (VAR). The change meant that under strict protocols referees could use video replays to review decisions in the event of a “clear and obvious error” or a “serious missed incident”. This led to the need by Fédération Internationale de Football Association (FIFA) to develop methods for quality control of the VAR-systems, which was done in collaboration with RISE Research Institutes of Sweden AB. One of the important aspects is the video quality. The novelty of this study is that it has performed a user study specifically targeting video experts i.e., to measure the perceived quality of video professionals working with video production as their main occupation. An experiment was performed involving 25 video experts. In addition, six video quality models have been benchmarked against the user data and evaluated to show which of the models could provide the best predictions of perceived quality for this application. Video Quality Metric for variable frame delay (VQM_VFD) had the best performance for both formats, followed by Video Multimethod Assessment Fusion (VMAF) and VQM General model.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:10<a name="HVEI-260"></a><a name="HVEI-260"></a><span style="float: right;">HVEI-260</span> <br> <span class="presentation_title" final_id="HVEI-260" onclick="toggle_me()" style="cursor: pointer;">User perception for dynamic video resolution change using VVC, </span><span class="author_string" final_id="HVEI-260" onclick="toggle_me()" style="cursor: pointer;">Sachin G. Deshpande and </span><span class="author_string" final_id="HVEI-260" onclick="toggle_me()" style="cursor: pointer;">Philip Cowan</span><span class="author_string" final_id="HVEI-260" onclick="toggle_me()" style="cursor: pointer;">, Sharp (United States)</span><span class="abstract_link" final_id="HVEI-260" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="HVEI-260" id="abstract-HVEI-260" onclick="toggle_me()" style="display:none; cursor:pointer;">We define experiments that measure user perception when video resolution changes dynamically. Versatile Video Coding (VVC) standard was recently finalized and it includes a reference picture resampling (RPR) tool. VVC RPR supports changing spatial resolution in a coded video sequence on a per picture basis. VVC RPR defines the downsampling and upsampling filters to be used when changing resolution. This paper provides results from subjective evaluation when VVC RPR is used for part of the video sequence to dynamically change resolution. The experiments use different QP values (or bitrates), different RPR scale factors and different highest original spatial resolutions. The results compare how users perceive video coded using VVC RPR for some pictures compared to an anchor which does not use RPR. In addition to the subjective results, we also describe performance of various metrics including PSNR, VMAF and MS-SSIM. Our results can help choose the highest RPR scale factor that can be used to achieve/ maintain certain perceived quality when using RPR (for example for bitrate reduction). The study also confirms that MS-SSIM and VMAF match subjective test results more closely compared to PSNR.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:30<a name="IQSP-261"></a><a name="IQSP-261"></a><span style="float: right;">IQSP-261</span> <br> <span class="presentation_title" final_id="IQSP-261" onclick="toggle_me()" style="cursor: pointer;">Proposing more ecologically-valid experiment protocol using YouTube platform, </span><span class="author_string" final_id="IQSP-261" onclick="toggle_me()" style="cursor: pointer;">Gabriela Wielgus, </span><span class="author_string" final_id="IQSP-261" onclick="toggle_me()" style="cursor: pointer;">Lucjan Janowski, </span><span class="author_string" final_id="IQSP-261" onclick="toggle_me()" style="cursor: pointer;">Kamil Koniuch, </span><span class="author_string" final_id="IQSP-261" onclick="toggle_me()" style="cursor: pointer;">Mikolaj Leszczuk, and </span><span class="author_string" final_id="IQSP-261" onclick="toggle_me()" style="cursor: pointer;">Rafal Figlus</span><span class="author_string" final_id="IQSP-261" onclick="toggle_me()" style="cursor: pointer;">, AGH University of Science and Technology (Poland)</span><span class="abstract_link" final_id="IQSP-261" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-261" id="abstract-IQSP-261" onclick="toggle_me()" style="display:none; cursor:pointer;">Video streaming is becoming increasingly popular, and with platforms like YouTube, users do not watch the video passively but seek, pause, and read the comments. The popularity of video services is possible due to the development of compression and quality prediction algorithms. However, those algorithms are developed based on classic experiments, which are non-ecologically valid. Therefore, classic experiments do not mimic real user interaction. Further development of the quality and compression algorithms depends on the results coming from ecologically-valid experiments. Therefore, we aim to propose such experiments. Nevertheless, proposing a new experimental protocol is difficult, especially when there is no limitation on content selection and control of the video. The freedom makes data analysis more challenging. In this paper, we present an ecologically-valid experimental protocol in which the subject assessed the quality while freely using YouTube. To achieve this goal, we developed a Chrome extension that collects objective data and allows network manipulation. Our deep data analysis shows a correlation between MOS and objectively measured results such as resolution, which proves that the ecologically-valid test works. Moreover, we have shown significant differences between subjects, allowing for a more detailed understanding, of how the quality influences the interaction with the service.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:50<a name="IQSP-262"></a><a name="IQSP-262"></a><span style="float: right;">IQSP-262</span> <br> <span class="presentation_title" final_id="IQSP-262" onclick="toggle_me()" style="cursor: pointer;">Evaluation of motion blur image quality in video frame interpolation, </span><span class="author_string" final_id="IQSP-262" onclick="toggle_me()" style="cursor: pointer;">Hai Dinh, </span><span class="author_string" final_id="IQSP-262" onclick="toggle_me()" style="cursor: pointer;">Fangwen Tu, </span><span class="author_string" final_id="IQSP-262" onclick="toggle_me()" style="cursor: pointer;">Qinyi Wang, </span><span class="author_string" final_id="IQSP-262" onclick="toggle_me()" style="cursor: pointer;">Brett Frymire, and </span><span class="author_string" final_id="IQSP-262" onclick="toggle_me()" style="cursor: pointer;">Bo Mu</span><span class="author_string" final_id="IQSP-262" onclick="toggle_me()" style="cursor: pointer;">, Omnivision Technology (United States)</span><span class="abstract_link" final_id="IQSP-262" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-262" id="abstract-IQSP-262" onclick="toggle_me()" style="display:none; cursor:pointer;">While slow motion has become a standard feature in mainstream cell phones, a fast approach without relying on specific training datasets to assess slow motion video quality is not available. Conventionally, researchers evaluate their algorithms with peak signal-to-noise ratio (PSNR) or structural similarity index measure (SSIM) between ground-truth and reconstructed frames. But they are both global evaluation index and more sensitive to noise or distortion brought by the interpolation. For video interpolation, especially for fast moving objects, motion blur as well as ghost problem are more essential to the audience subjective judgment. How to achieve a proper evaluation for Video Frame Interpolation (VFI) task is still a problem that is not well addressed.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">12:10<a name="IQSP-263"></a><a name="IQSP-263"></a><span style="float: right;">IQSP-263</span> <br> <span class="presentation_title" final_id="IQSP-263" onclick="toggle_me()" style="cursor: pointer;">Subjective video quality for 4K HDR-WCG content using a browser-based approach for “at-home” testing, </span><span class="author_string" final_id="IQSP-263" onclick="toggle_me()" style="cursor: pointer;">Lukáš Krasula<sup>1</sup>, </span><span class="author_string" final_id="IQSP-263" onclick="toggle_me()" style="cursor: pointer;">Anustup Choudhury<sup>2</sup>, </span><span class="author_string" final_id="IQSP-263" onclick="toggle_me()" style="cursor: pointer;">Scott Daly<sup>2</sup>, </span><span class="author_string" final_id="IQSP-263" onclick="toggle_me()" style="cursor: pointer;">Zhi Li<sup>1</sup>, </span><span class="author_string" final_id="IQSP-263" onclick="toggle_me()" style="cursor: pointer;">Robin Atkins<sup>2</sup>, </span><span class="author_string" final_id="IQSP-263" onclick="toggle_me()" style="cursor: pointer;">Ludovic Malfait<sup>2</sup>, and </span><span class="author_string" final_id="IQSP-263" onclick="toggle_me()" style="cursor: pointer;">Aditya Mavlankar<sup>1</sup></span><span class="author_string" final_id="IQSP-263" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Netflix, Inc. and <sup>2</sup>Dolby Laboratories, Inc. (United States)</span><span class="abstract_link" final_id="IQSP-263" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-263" id="abstract-IQSP-263" onclick="toggle_me()" style="display:none; cursor:pointer;">A subjective quality study of 4K HDR-WCG (3840 x 2160, High Dynamic Range, Wide Color Gamut) video content was performed in an at-home scenario. There are no available datasets on such content, yet they are crucial for objective quality metrics development and testing. While at-home testing generally implies lack of calibration, we sought to maximize calibration by limiting the displays to a specific model of TV that we have calibrated in our lab and have found that unit to unit deviations are small. Moreover, we performed the experiment in the Dolby Vision mode (where the various enhancements of the TV are turned OFF by default). In addition, we asked subjects to go through procedures to ensure a standard viewing distance of 1.6 picture heights, and to eliminate ambient lighting effects on display contrast by viewing in dark or dim conditions. A browser approach was used which took control of the TV, and ensure the content was viewed at the native resolution of the TV (e.g., dot-on-dot mode). Particular care was given to content selection to probe specific challenge cases of the display behavior as well as human vision (e.g., complex motion effects on eye tracking). Further, several clips were selected that represent the highest quality possible with 2021 technology. We have found the subject response variability was like lab-based experiments, suggesting the noise in the results due to display variability and lack of unit-to-unit calibration, was less than the within-subject variability due to personal physiology or preferences. Several statistical models and subject-rejection strategies will be compared and the usefulness of the data for objective metrics will be presented.</p> <p>&nbsp;</p> <br> <br> <p class="event_time">12:30 – 2:00 PM Lunch</p> <div class="pinkcallout"> <p class="session_title">Tuesday 17 January PLENARY: Embedded Gain Maps for Adaptive Display of High Dynamic Range Images</p> <span class="chair">Session Chair: Robin Jenkin, NVIDIA Corporation (United States)<br> </span> <span class="session_time">2:00 PM – 3:00 PM</span> <br> <span class="room">Cyril Magnin I/II/III<br> </span> <span></span> <p class="session_notes">Images optimized for High Dynamic Range (HDR) displays have brighter highlights and more detailed shadows, resulting in an increased sense of realism and greater impact. However, a major issue with HDR content is the lack of consistency in appearance across different devices and viewing environments. There are several reasons, including varying capabilities of HDR displays and the different tone mapping methods implemented across software and platforms. Consequently, HDR content authors can neither control nor predict how their images will appear in other apps.</p> <span></span> <p class="session_notes">We present a flexible system that provides consistent and adaptive display of HDR images. Conceptually, the method combines both SDR and HDR renditions within a single image and interpolates between the two dynamically at display time. We compute a Gain Map that represents the difference between the two renditions. In the file, we store a Base rendition (either SDR or HDR), the Gain Map, and some associated metadata. At display time, we combine the Base image with a scaled version of the Gain Map, where the scale factor depends on the image metadata, the HDR capacity of the display, and the viewing environment. </p> <br> <span></span> <p class="session_notes"> </p> <span class="author_string"><strong>Eric Chan, </strong>Fellow, Adobe Inc. (United States)<span class="author_string"></span></span> <p>&nbsp;</p> <span></span> <p class="session_notes">Eric Chan is a Fellow at Adobe, where he develops software for editing photographs. Current projects include Photoshop, Lightroom, Camera Raw, and Digital Negative (DNG). When not writing software, Chan enjoys spending time at his other keyboard, the piano. He is an enthusiastic nature photographer and often combines his photo activities with travel and hiking.</p> <br> <span class="author_string"><strong>Paul M. Hubel, </strong>director of Image Quality in Software Engineering, Apple Inc. (United States)<span class="author_string"></span></span> <p>&nbsp;</p> <span></span> <p class="session_notes">Paul M. Hubel is director of Image Quality in Software Engineering at Apple. He has worked on computational photography and image quality of photographic systems for many years on all aspects of the imaging chain, particularly for iPhone. He trained in optical engineering at University of Rochester, Oxford University, and MIT, and has more than 50 patents on color imaging and camera technology. Hubel is active on the ISO-TC42 committee Digital Photography, where this work is under discussion, and is currently a VP on the IS&amp;T Board. Outside work he enjoys photography, travel, cycling, coffee roasting, and plays trumpet in several bay area ensembles.</p> </div> <br> <p class="event_time">3:00 – 3:30 PM Coffee Break</p> <p class="session_title">Objective Quality Assessment (T3)</p> <span class="chair_label">Session Chair: </span> <span class="chair">Peter Burns, Rochester Institure of Tech. (United States)<br> </span> <span class="session_time">3:30 – 5:30 PM</span> <br> <span class="room">Cyril Magnin III<br> </span> <p class="presentation_time" style="text-align:left;">3:30<a name="IQSP-305"></a><span style="float: right;">IQSP-305</span> <br> <span class="presentation_title" final_id="IQSP-305" onclick="toggle_me()" style="cursor: pointer;">Another look at SSIM image quality metric, </span><span class="author_string" final_id="IQSP-305" onclick="toggle_me()" style="cursor: pointer;">Yuriy Reznik</span><span class="author_string" final_id="IQSP-305" onclick="toggle_me()" style="cursor: pointer;">, Brightcove, Inc. (United States)</span><span class="abstract_link" final_id="IQSP-305" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-305" id="abstract-IQSP-305" onclick="toggle_me()" style="display:none; cursor:pointer;">We review the design of the SSIM quality metric and offer an alternative model of SSIM computation, utilizing subband decomposition and identical distance measures in each subband. We show that this model performs very close to the original and offers many advantages from a methodological standpoint. It immediately brings several possible explanations of why SSIM is effective. It also suggests a simple strategy for band noise allocation optimizing SSIM scores. This strategy may aid the design of encoders or pre-processing filters for video coding. Finally, this model leads to more straightforward mathematical connections between SSIM, MSE, and SNR metrics, improving previously known results.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">3:50<a name="IQSP-306"></a><span style="float: right;">IQSP-306</span> <br> <span class="presentation_title" final_id="IQSP-306" onclick="toggle_me()" style="cursor: pointer;">What are we looking at? An investigation on the use of deep learning models for image quality assessment, </span><span class="author_string" final_id="IQSP-306" onclick="toggle_me()" style="cursor: pointer;">Ha Thu Nguyen and </span><span class="author_string" final_id="IQSP-306" onclick="toggle_me()" style="cursor: pointer;">Seyed Ali Amirshahi</span><span class="author_string" final_id="IQSP-306" onclick="toggle_me()" style="cursor: pointer;">, Norwegian University of Science and Technology (Norway)</span><span class="abstract_link" final_id="IQSP-306" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-306" id="abstract-IQSP-306" onclick="toggle_me()" style="display:none; cursor:pointer;">In recent years several different Image Quality Metrics (IQMs) have been introduced which are focused on comparing the feature maps extracted from different pre-trained deep learning models[1-3]. While such objective IQMs have shown a high correlation with the subjective scores little attention has been paid on how they could be used to better understand the Human Visual System (HVS) and how observers evaluate the quality of images. In this study, by using different pre-trained Convolutional Neural Networks (CNN) models we identify the most relevant features in Image Quality Assessment (IQA). By visualizing these feature maps we try to have a better understanding about which features play a dominant role when evaluating the quality of images. Experimental results on four benchmark datasets show that the most important feature maps represent repeated textures such as stripes or checkers, and feature maps linked to colors blue, or orange also play a crucial role. Additionally, when it comes to calculating the quality of an image based on a comparison of different feature maps, a higher accuracy can be reached when only the most relevant feature maps are used in calculating the image quality instead of using all the extracted feature maps from a CNN model. [1] Amirshahi, Seyed Ali, Marius Pedersen, and Stella X. Yu. "Image quality assessment by comparing CNN features between images." Journal of Imaging Science and Technology 60.6 (2016): 60410-1. [2] Amirshahi, Seyed Ali, Marius Pedersen, and Azeddine Beghdadi. "Reviving traditional image quality metrics using CNNs." Color and imaging conference. Vol. 2018. No. 1. Society for Imaging Science and Technology, 2018. [3] Gao, Fei, et al. "Deepsim: Deep similarity for image quality assessment." Neurocomputing 257 (2017): 104-114.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">4:10<a name="IQSP-307"></a><span style="float: right;">IQSP-307</span> <br> <span class="presentation_title" final_id="IQSP-307" onclick="toggle_me()" style="cursor: pointer;">A framework for the metrification of input image quality in deep networks, </span><span class="author_string" final_id="IQSP-307" onclick="toggle_me()" style="cursor: pointer;">Alexandra Psarrou and </span><span class="author_string" final_id="IQSP-307" onclick="toggle_me()" style="cursor: pointer;">Sophie Triantaphillidou</span><span class="author_string" final_id="IQSP-307" onclick="toggle_me()" style="cursor: pointer;">, University of Westminster (United Kingdom)</span><span class="abstract_link" final_id="IQSP-307" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-307" id="abstract-IQSP-307" onclick="toggle_me()" style="display:none; cursor:pointer;">Deep Neural Networks (DNNs) are critical for real-time imaging applications including autonomous vehicles. DNNs are often trained and validated with images that originate from a limited number of cameras, each of which has its own hardware and image signal processing (ISP) characteristics. However, in most real-time embedded systems, the input images come from a variety of cameras with different ISP pipelines, and often include perturbations due to a variety of scene conditions. Data augmentation methods are commonly exploited to enhance the robustness of such systems. Alternatively, methods are employed to detect input images that are unfamiliar to the trained networks, including out of distribution detection. Despite these efforts DNNs remain widely systems with operational boundaries that cannot be easily defined. One reason is that, while training and benchmark image datasets include samples with a variety of perturbations, there is a lack of research in the areas of metrification of input image quality suitable to DNNs and a universal method to relate quality to DNN performance using meaningful quality metrics. This paper addresses this lack of metrification specific to DNNs systems and introduces a framework that uses systematic modification of image quality attributes and relate input image quality to DNN performance.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">4:30<a name="IQSP-308"></a><span style="float: right;">IQSP-308</span> <br> <span class="presentation_title" final_id="IQSP-308" onclick="toggle_me()" style="cursor: pointer;">Investigating pretrained self-supervised vision transformers for reference-based quality assessment., </span><span class="author_string" final_id="IQSP-308" onclick="toggle_me()" style="cursor: pointer;">Kanjar De</span><span class="author_string" final_id="IQSP-308" onclick="toggle_me()" style="cursor: pointer;">, Lulea University of Technology (Sweden)</span><span class="abstract_link" final_id="IQSP-308" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-308" id="abstract-IQSP-308" onclick="toggle_me()" style="display:none; cursor:pointer;">Reference-based image quality assessment techniques use information from an undistorted reference image of the same scene to estimate the quality of a distorted target image. The main challenge in designing algorithms for quality assessment is to incorporate the behavior of the human visual system into the algorithms. The advent of deep learning (DL) techniques has garnered sufficient interest among researchers in the field of image quality assessment. The common limitation of applying deep learning for image quality assessment is its dependence on a large amount of subjective training data. Recent advances in the field of patch-based self-supervised vision transformers have achieved remarkable results for tasks like object segmentation, copy detection, etc. and other downstream computer vision tasks. In this paper, we study how the distance between the pretrained self-supervised vision transformer features applied on pristine and distorted images is related to the human visual system. Experiments carried out on three publicly available image quality databases (namely KADID-10K, TID2013, and MDID2016) have yielded promising results that can be further exploited to design perceptual reference-based image quality assessment methods.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">4:50<a name="IQSP-309"></a><span style="float: right;">IQSP-309</span> <br> <span class="presentation_title" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Evaluation of image quality metrics designed for DRI tasks with automotive cameras, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Valentine Klein, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Theophanis Eleftheriou, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Yiqi LI, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Emilie Baudin, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Claudio Greco, </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Laurent Chanas, and </span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">Frédéric Guichard</span><span class="author_string" final_id="IQSP-309" onclick="toggle_me()" style="cursor: pointer;">, DXOMARK (France)</span><span class="abstract_link" final_id="IQSP-309" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-309" id="abstract-IQSP-309" onclick="toggle_me()" style="display:none; cursor:pointer;">Driving assistance is increasingly used in new car models. Most driving assistance systems are based on automotive cameras and computer vision. Computer Vision, regardless of the underlying algorithms and technology, requires the images to have good image quality, defined according to the task. This notion of good image quality is still to be defined in the case of computer vision as it has very different criteria than human vision: humans have a better contrast detection ability than image chains. The aim of this article is to compare three different metrics designed for detection of objects with computer vision: the Contrast Detection Probability (CDP) [1, 2, 3, 4], the Contrast Signal to Noise Ratio (CSNR) [5] and the Frequency of Correct Resolution (FCR) [6]. For this purpose, the computer vision task of reading the characters on a license plate will be used as a benchmark. The objective is to check the correlation between the objective metric and the ability of a neural network to perform this task. Thus, a protocol to test these metrics and compare them to the output of the neural network has been designed and the pros and cons of each of these three metrics have been noted.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">5:10<a name="IQSP-310"></a><span style="float: right;">IQSP-310</span> <br> <span class="presentation_title" final_id="IQSP-310" onclick="toggle_me()" style="cursor: pointer;">Towards image-computable visual text quality metric with deep neural network, </span><span class="author_string" final_id="IQSP-310" onclick="toggle_me()" style="cursor: pointer;">Ling-Qi Zhang<sup>1,</sup><sup>2</sup>, </span><span class="author_string" final_id="IQSP-310" onclick="toggle_me()" style="cursor: pointer;">Minjung Kim<sup>1</sup>, </span><span class="author_string" final_id="IQSP-310" onclick="toggle_me()" style="cursor: pointer;">James Hillis<sup>1</sup>, and </span><span class="author_string" final_id="IQSP-310" onclick="toggle_me()" style="cursor: pointer;">Trisha Lian<sup>1</sup></span><span class="author_string" final_id="IQSP-310" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Meta Reality Labs and <sup>2</sup>University of Pennsylvania (United States)</span><span class="abstract_link" final_id="IQSP-310" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-310" id="abstract-IQSP-310" onclick="toggle_me()" style="display:none; cursor:pointer;">Image quality metrics have become invaluable tools for image processing and display system development. These metrics are typically developed for and tested on images and videos of natural content. Text, on the other hand, has unique features and supports a distinct visual function: reading. It is therefore not clear if these image quality metrics are efficient or optimal as measures of text quality. Here, we developed a domain-specific image quality metric for text and compared its performance against quality metrics developed for natural images. To develop our metric, we first trained a deep neural network to perform text classification on a data set of distorted letter images. We then compute the responses of internal layers of the network to uncorrupted and corrupted images of text, respectively. We used the cosine dissimilarity between the responses as a measure of text quality. Preliminary results indicate that both our model and more established quality metrics (e.g., SSIM) are able to predict general trends in participants’ text quality ratings. In some cases, our model is able to outperform SSIM. We further developed our model to predict response data in a two-alternative forced choice experiment, on which only our model achieved very high accuracy.</p> <p>&nbsp;</p> <br> <br> <p class="event_time">5:30 – 7:00 PM EI 2023 Symposium Demonstration Session (in the Cyril Magnin Foyer)</p> <p class="date">Wednesday 18 January 2023</p> <p class="session_title">System Performance (W1)</p> <span class="chair_label">Session Chair: </span> <span class="chair">Jonathan Phillips, Imatest, LLC (United States)<br> </span> <span class="session_time">8:50 – 10:10 AM</span> <br> <span class="room">Cyril Magnin III<br> </span> <p class="presentation_time" style="text-align:left;">8:50<a name="IQSP-311"></a><span style="float: right;">IQSP-311</span> <br> <span class="presentation_title" final_id="IQSP-311" onclick="toggle_me()" style="cursor: pointer;">A tool for deriving camera spatial frequency response from natural scenes (NS-SFR), </span><span class="author_string" final_id="IQSP-311" onclick="toggle_me()" style="cursor: pointer;">Oliver van Zwanenberg<sup>1</sup>, </span><span class="author_string" final_id="IQSP-311" onclick="toggle_me()" style="cursor: pointer;">Sophie Triantaphillidou<sup>1</sup>, and </span><span class="author_string" final_id="IQSP-311" onclick="toggle_me()" style="cursor: pointer;">Robin B. Jenkin<sup>1,</sup><sup>2</sup></span><span class="author_string" final_id="IQSP-311" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>University of Westminster (United Kingdom) and <sup>2</sup>NVIDIA Corporation (United States)</span><span class="abstract_link" final_id="IQSP-311" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-311" id="abstract-IQSP-311" onclick="toggle_me()" style="display:none; cursor:pointer;">Recent research on digital camera performance evaluation introduced the Natural Scene Spatial Frequency Response (NS-SFR) framework, shown to provide a comparable measure to the ISO12233 edge SFR (e-SFR) but derived outside laboratory conditions. The framework extracts step-edges captured from pictorial natural scenes to evaluate the camera SFR. It is in 2-parts. The first utilizes the ISO12233 slanted-edge algorithm to produce an ‘envelope’ of NS-SFRs. The second estimates the system e-SFR from this NS-SFR data. One drawback of this proposed methodology has been the computation time. The process was not optimized, as it first derived NS-SFRs from all suitable step-edges and then further validated and statistically treated the results to estimate the e-SFR. This paper presents changes to the framework processes, aiming to optimize the computation time so that it is practical for real-world implementation. The developments include an improved framework structure, a pixel-stretching filter alternative, and the capability to utilize Graphics Processing Unit (GPU) acceleration. In addition, the methodology was updated to utilize the latest e-SFR algorithm implementation. The resulting code has been incorporated into a self-executable user interface prototype, available in GitHub. Future goals include making it an open-access, cloud-based solution to be used by scientists, camera evaluation labs and the general public.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">9:10<a name="IQSP-312"></a><span style="float: right;">IQSP-312</span> <br> <span class="presentation_title" final_id="IQSP-312" onclick="toggle_me()" style="cursor: pointer;">Influence of the light source on the image sensor characterization according to EMVA 1288, </span><span class="author_string" final_id="IQSP-312" onclick="toggle_me()" style="cursor: pointer;">Ganesh D. Kubina, </span><span class="author_string" final_id="IQSP-312" onclick="toggle_me()" style="cursor: pointer;">Max Gäde, and </span><span class="author_string" final_id="IQSP-312" onclick="toggle_me()" style="cursor: pointer;">Uwe Artmann</span><span class="author_string" final_id="IQSP-312" onclick="toggle_me()" style="cursor: pointer;">, Image Engineering GmbH &amp; Co KG (Germany)</span><span class="abstract_link" final_id="IQSP-312" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-312" id="abstract-IQSP-312" onclick="toggle_me()" style="display:none; cursor:pointer;">Due to the increasing demand of machine vision applications in a variety of scenarios, it is necessary to know the capability of the hardware before implementing it. The 1288 Standard by the European Machine Vision Association aims to provide a basis to compare the performance of cameras based on a characterization of the image sensor, using a monochrome light source. This paper aims to investigate the influence the light source has on the measurement results. Which parameters are dependent on it, and which are not? Are there any benefits to using a broadband light source? To answer this question, a series of measurement runs using six different illuminants were performed with the same camera. The illuminants included monochromatic blue, green and red light as well as three different white spectra (CIE E, CIE D65 and white LED). The results show that the influence of the light source on the metrics is limited to the measured quantum efficiency of the camera and related parameters. As a consequence, using a non-monochromatic light source for the measurements might be an option, as it can provide better insight into use-case specific performance and improve comparability.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">9:30<a name="IQSP-313"></a><span style="float: right;">IQSP-313</span> <br> <span class="presentation_title" final_id="IQSP-313" onclick="toggle_me()" style="cursor: pointer;">Managing deviant data in spatial frequency response (SFR) measurement by outlier rejection, </span><span class="author_string" final_id="IQSP-313" onclick="toggle_me()" style="cursor: pointer;">Peter Burns<sup>1</sup> and </span><span class="author_string" final_id="IQSP-313" onclick="toggle_me()" style="cursor: pointer;">Don Williams<sup>2</sup></span><span class="author_string" final_id="IQSP-313" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Burns Digital Imaging and <sup>2</sup>Image Science Associates (United States)</span><span class="abstract_link" final_id="IQSP-313" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-313" id="abstract-IQSP-313" onclick="toggle_me()" style="display:none; cursor:pointer;">The edge-based Spatial Frequency Response (e-SFR) method was first developed for evaluating camera image resolution and image sharpness. The method was described in the first version of the ISO 12233 standard. Since then, the method has been applied in a wide range of applications, including medical, security, archiving, and document processing. However, with this broad application, several of the assumptions of the method are no longer closely followed. This has led to several improvements aimed at broadening its application, for example for lenses with spatial distortion. We can think of the evaluation of image quality parameters as an estimation problem, based on the gathered data, often from digital images. In this paper, we address the mitigation of measurement error that is introduced when the analysis is applied to low-exposure (and therefore, noisy) applications and those with small analysis regions. We consider the origins of both bias and variation in the resulting SFR measurement and present practical ways to reduce them. We describe the screening of outlier edge-location values as a method for improved edge detection. This, in turn, is related to a reduction in negative bias in the resulting SFR.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">9:50<a name="IQSP-314"></a><span style="float: right;">IQSP-314</span> <br> <span class="presentation_title" final_id="IQSP-314" onclick="toggle_me()" style="cursor: pointer;">Optimization of ISP parameters for low light conditions using a non-linear reference based approach, </span><span class="author_string" final_id="IQSP-314" onclick="toggle_me()" style="cursor: pointer;">Shubham Ravindra Alai<sup>1</sup>, </span><span class="author_string" final_id="IQSP-314" onclick="toggle_me()" style="cursor: pointer;">Radhesh Bhat<sup>1</sup>, and </span><span class="author_string" final_id="IQSP-314" onclick="toggle_me()" style="cursor: pointer;">Ajay Basarur<sup>2</sup></span><span class="author_string" final_id="IQSP-314" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>PathPartner Technology - Member of KPIT Group (India) and <sup>2</sup>presenter only (United States)</span><span class="abstract_link" final_id="IQSP-314" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-314" id="abstract-IQSP-314" onclick="toggle_me()" style="display:none; cursor:pointer;">An image signal processor (ISP) transforms a sensor's raw image into a RGB image for use in computer or human vision applications. ISP is composed of various functional blocks and each block contributes uniquely to make the image best suitable for the target application. Whereas, each block consists of several hyperparameters and each hyperparameter needs to be tuned (usually done manually by experts in an iterative manner) to achieve the target image quality. The tuning becomes challenging and increasingly iterative especially in low to very low light conditions where the amount of details preserved by the sensor is limited and ISP parameters have to be tuned to balance the amount of details recovered, noise, sharpness, contrast etc. To extract maximum information out of the image, usually it is required to increase the ISO gain which eventually impacts the noise and color accuracy. Also, the number of ISP parameters that need to be tuned are huge and it becomes impractical to consider all of them in such low light conditions to arrive at the best possible settings. To tackle challenges in manual tuning, especially for low light conditions we have implemented an automatic hyperparameter optimization model that can tune the low lux images so that they are perceptually equivalent to high-lux images. The experiments for IQ validation are carried out under challenging low light conditions and scenarios using Qualcomm’s Spectra ISP simulator with a 13MP OV sensor, and the performance of automatic tuned IQ is compared with manual tuned IQ for human vision use-cases. With experimental results, we have proved that with the help of evolutionary algorithms and local optimization it is possible to optimize the ISP parameters such that without using any of the KPI metrics still low-lux image/ image captured with different ISP (test image) can perceptually be improved that are equivalent to high-lux or well-tuned (reference) image.</p> <p>&nbsp;</p> <br> <br> <p class="event_time">10:00 AM – 3:30 PM Industry Exhibition - Wednesday (in the Cyril Magnin Foyer)</p> <p class="event_time">10:20 – 10:50 AM Coffee Break</p> <p class="session_title">Mobile and Camera Quality Assessment (W2)</p> <span class="chair_label">Session Chair: </span> <span class="chair">Elaine Jin, Rivian Automotive, Inc. (United States)<br> </span> <span class="session_time">10:50 AM – 12:30 PM</span> <br> <span class="room">Cyril Magnin III<br> </span> <p class="presentation_time" style="text-align:left;">10:50<a name="IQSP-315"></a><span style="float: right;">IQSP-315</span> <br> <span class="presentation_title" final_id="IQSP-315" onclick="toggle_me()" style="cursor: pointer;">Image quality performance of CMOS image sensor equipped with Nano Prism, </span><span class="author_string" final_id="IQSP-315" onclick="toggle_me()" style="cursor: pointer;">Sungho Cha</span><span class="author_string" final_id="IQSP-315" onclick="toggle_me()" style="cursor: pointer;">, Samsung Electronics (Republic of Korea)</span><span class="abstract_link" final_id="IQSP-315" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-315" id="abstract-IQSP-315" onclick="toggle_me()" style="display:none; cursor:pointer;">Smartphones with 100 million pixel sensor are on the market. After that, it is expected to mount a higher resolution mobile camera module of 200 million pixels or more. In order to develop high resolution sensor products by mounting more pixels in a limited space, it is necessary to reduce the size of the pixels. There are currently sensors on the market with 0.64um pixels. It is expected that sensors with smaller pixels will be developed in the future. In terms of image quality, the smaller the pixel size, the smaller the amount of light received. Therefore, the image quality deteriorates in terms of noise and crosstalk. To overcome this limitation, various high sensitivity sensors are being developed, and it is advantageous to mount Nano Prism in the development of high sensitivity sensor. In this paper, we introduce the image quality performance of CMOS image sensor equipped with Nano Prism.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:10<a name="IQSP-316"></a><span style="float: right;">IQSP-316</span> <br> <span class="presentation_title" final_id="IQSP-316" onclick="toggle_me()" style="cursor: pointer;">Noise quality estimation on portraits in realistic controlled scenarios, </span><span class="author_string" final_id="IQSP-316" onclick="toggle_me()" style="cursor: pointer;">Nicolas Chahine<sup>1,</sup><sup>2</sup>, </span><span class="author_string" final_id="IQSP-316" onclick="toggle_me()" style="cursor: pointer;">Samuel S. Santos<sup>3</sup>, </span><span class="author_string" final_id="IQSP-316" onclick="toggle_me()" style="cursor: pointer;">Sofiene Lahouar<sup>1</sup>, </span><span class="author_string" final_id="IQSP-316" onclick="toggle_me()" style="cursor: pointer;">Ana-Stefania Calarasanu<sup>1</sup>, </span><span class="author_string" final_id="IQSP-316" onclick="toggle_me()" style="cursor: pointer;">Sira Ferradans<sup>1</sup>, </span><span class="author_string" final_id="IQSP-316" onclick="toggle_me()" style="cursor: pointer;">Benoit Pochon<sup>1</sup>, and </span><span class="author_string" final_id="IQSP-316" onclick="toggle_me()" style="cursor: pointer;">Frédéric Guichard<sup>1</sup></span><span class="author_string" final_id="IQSP-316" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>DXOMARK Image Labs, <sup>2</sup>INRIA, and <sup>3</sup>Parrot (France)</span><span class="abstract_link" final_id="IQSP-316" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-316" id="abstract-IQSP-316" onclick="toggle_me()" style="display:none; cursor:pointer;">The wide use of cameras by the public has raised the interest of image quality evaluation and ranking. Current cameras embed complex processing pipelines that adapt strongly to the scene content by implementing, for instance, advanced noise reduction or local adjustment on faces. However, current methods of Image Quality assessment are based on static geometric charts which are not representative of the common camera usage that targets mostly portraits. Moreover, on non-synthetic content most relevant features such as detail preservation or noisiness are often un-tractable. To overcome this situation, we propose to mix classical measurements and Machine learning based methods: we reproduce realistic content triggering this complex processing pipelines in controlled conditions in the lab which allows for rigorous quality assessment. Then, ML based methods can reproduce perceptual quality annotated previously. In this paper, we focus on noise quality evaluation and test on two different set ups: closeup and distant portraits. These setups provide scene capture conditions flexibility, but most of all, allow the evaluation of all quality camera ranges from high quality DSLR to poor quality video conference. Our numerical results show the relevance of our solution compared to geometric charts and the importance of adapting to realistic content.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:30<a name="IQSP-317"></a><span style="float: right;">IQSP-317</span> <br> <span class="presentation_title" final_id="IQSP-317" onclick="toggle_me()" style="cursor: pointer;">VCX – Version 2023 – The latest transparent and objective mobile phone test scheme, </span><span class="author_string" final_id="IQSP-317" onclick="toggle_me()" style="cursor: pointer;">Uwe Artmann<sup>1</sup> and </span><span class="author_string" final_id="IQSP-317" onclick="toggle_me()" style="cursor: pointer;">Anthony L. Orchard<sup>2</sup></span><span class="author_string" final_id="IQSP-317" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Image Engineering GmbH &amp; Co KG (Germany) and <sup>2</sup>Intel Corporation (United States)</span><span class="abstract_link" final_id="IQSP-317" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-317" id="abstract-IQSP-317" onclick="toggle_me()" style="display:none; cursor:pointer;">VCX or Valued Camera eXperience is a nonprofit organization dedicated to the objective and transparent evaluation of mobile phone cameras. The members continuously work on the development of a test scheme that can provide an objective score for the camera performance. Every device is tested for a variety of image quality factors while these typically based on existing standards. This paper presents that latest development with the newly released version 2023 and the process behind it. New metric included are extended tests on video dynamics, AE and AWB, dedicated tests on ultra wide modules and adjustments to the metric system based on a large scale subjective study.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">11:50<a name="IQSP-318"></a><span style="float: right;">IQSP-318</span> <br> <span class="presentation_title" final_id="IQSP-318" onclick="toggle_me()" style="cursor: pointer;">VCX – A transparent and objective test scheme for webcams, </span><span class="author_string" final_id="IQSP-318" onclick="toggle_me()" style="cursor: pointer;">Uwe Artmann<sup>1</sup> and </span><span class="author_string" final_id="IQSP-318" onclick="toggle_me()" style="cursor: pointer;">Anthony L. Orchard<sup>2</sup></span><span class="author_string" final_id="IQSP-318" onclick="toggle_me()" style="cursor: pointer;">; <sup>1</sup>Image Engineering GmbH &amp; Co KG (Germany) and <sup>2</sup>Intel Corporation (United States)</span><span class="abstract_link" final_id="IQSP-318" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-318" id="abstract-IQSP-318" onclick="toggle_me()" style="display:none; cursor:pointer;">VCX or Valued Camera eXperience is a nonprofit organization dedicated to the objective and transparent evaluation of consumer camera devices like mobile phones and webcams. The members continuously work on the development of a test scheme that can provide an objective score for the camera performance. We present the new developed test scheme for webcams used in video conference systems. The test scheme covers many different aspects of camera performance including global image quality factors like AE, AWB and color and local image quality factors like resolution, texture, and sharpening. The used test procedure considers state of the art algorithms and covers more challenging situations and scenes compared to existing test schemes.</p> <p>&nbsp;</p> <p class="presentation_time" style="text-align:left;">12:10<a name="IQSP-319"></a><span style="float: right;">IQSP-319</span> <br> <span class="presentation_title" final_id="IQSP-319" onclick="toggle_me()" style="cursor: pointer;">Improvement of the flare evaluation and applications in NIR, </span><span class="author_string" final_id="IQSP-319" onclick="toggle_me()" style="cursor: pointer;">Elodie Souksava, </span><span class="author_string" final_id="IQSP-319" onclick="toggle_me()" style="cursor: pointer;">Emilie Baudin, </span><span class="author_string" final_id="IQSP-319" onclick="toggle_me()" style="cursor: pointer;">Claudio Greco, </span><span class="author_string" final_id="IQSP-319" onclick="toggle_me()" style="cursor: pointer;">Hoang-Phi Nguyen, </span><span class="author_string" final_id="IQSP-319" onclick="toggle_me()" style="cursor: pointer;">Laurent Chanas, and </span><span class="author_string" final_id="IQSP-319" onclick="toggle_me()" style="cursor: pointer;">Frédéric Guichard</span><span class="author_string" final_id="IQSP-319" onclick="toggle_me()" style="cursor: pointer;">, DxOMark Image Labs (France)</span><span class="abstract_link" final_id="IQSP-319" onclick="toggle_me()"> [view abstract] </span></p> <p class="abstract" final_id="IQSP-319" id="abstract-IQSP-319" onclick="toggle_me()" style="display:none; cursor:pointer;">Near-infrared (NIR) light sources have become increasingly present in our daily lives, which led to the growth of the number of cameras designed for viewing in the NIR spectrum (sometimes in addition to the visible) in the automotive, mobile, and surveillance sectors. However, camera evaluation metrics are still mainly focused on sensors in visible lights. The goal of this article is to extend our existing flare setup and objective flare metric to quantify NIR flare for different cameras and to evaluate the performance of several NIR filters. We also compare the results in both visible and NIR lighting for different types of devices. Moreover, we propose a new method to measure the ISO speed rating in visible light spectrum (originally defined in the ISO standard 12232) and an equivalent ISO for NIR spectrum with our flare setup.</p> <p>&nbsp;</p> <br> <br> <p class="event_time">12:30 – 2:00 PM Lunch</p> <div class="pinkcallout"> <p class="session_title">Wednesday 18 January PLENARY: Bringing Vision Science to Electronic Imaging: The Pyramid of Visibility</p> <span class="chair">Session Chair: Andreas Savakis, Rochester Institute of Technology (United States)<br> </span> <span class="session_time">2:00 PM – 3:00 PM</span> <br> <span class="room">Cyril Magnin I/II/III<br> </span> <span></span> <p class="session_notes">Electronic imaging depends fundamentally on the capabilities and limitations of human vision. The challenge for the vision scientist is to describe these limitations to the engineer in a comprehensive, computable, and elegant formulation. Primary among these limitations are visibility of variations in light intensity over space and time, of variations in color over space and time, and of all of these patterns with position in the visual field. Lastly, we must describe how all these sensitivities vary with adapting light level. We have recently developed a structural description of human visual sensitivity that we call the Pyramid of Visibility, that accomplishes this synthesis. This talk shows how this structure accommodates all the dimensions described above, and how it can be used to solve a wide variety of problems in display engineering.</p> <br> <span></span> <p class="session_notes"> </p> <span class="author_string"><strong>Andrew B. Watson, </strong>chief vision scientist, Apple Inc. (United States)<span class="author_string"></span></span> <p>&nbsp;</p> <span></span> <p class="session_notes">Andrew Watson is Chief Vision Scientist at Apple, where he leads the application of vision science to technologies, applications, and displays. His research focuses on computational models of early vision. He is the author of more than 100 scientific papers and 8 patents. He has 21,180 citations and an h-index of 63. Watson founded the Journal of Vision, and served as editor-in-chief 2001-2013 and 2018-2022. Watson has received numerous awards including the Presidential Rank Award from the President of the United States.</p> </div> <br> <p class="event_time">3:00 – 3:30 PM Coffee Break</p> <p class="event_time">5:30 – 7:00 PM EI 2023 Symposium Interactive (Poster) Paper Session (in the Cyril Magnin Foyer)</p> <p class="event_time">5:30 – 7:00 PM EI 2023 Meet the Future: A Showcase of Student and Young Professionals Research (in the Cyril Magnin Foyer)</p> <script> function toggle_me() { var elm = event.srcElement var final_id = elm.getAttribute("final_id") var the_id = "abstract-" + final_id; var x = document.getElementById(the_id); if (x.style.display === "none"){ x.style.display = "block"; } else { x.style.display = "none"; } }</script></div><div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_ControlErrorPanel_ConferenceHeading" class="Error" style="Display:None;"> </div> </div> </div> </div> </div></div> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_3" class="rmpView rmpHidden"> <div class="ContentTabbedDisplay AddPadding"> <p class="AsiWarning">No content found</p> </div> </div><div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_4" class="rmpView rmpHidden"> <div class="ContentTabbedDisplay AddPadding"> <p class="AsiWarning">No content found</p> </div> </div><input id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage_ClientState" name="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage_ClientState" type="hidden" /> </div> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_updatePanel"> <input type="submit" name="ctl01$TemplateBody$WebPartManager1$gwpciConfCCO$ciConfCCO$refreshTrigger" value="Refresh" id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_refreshTrigger" style="display:none" /> </div> </div> <div id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_panStep"> </div> <span id="ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_debug"></span> </div> </div> </div></div> </div> </div> <div id="ctl01_TemplateBody_ContentPage2_downloadContainer" style="display:none;"> <input type="hidden" name="ctl01$TemplateBody$ContentPage2$HiddenDownloadPathField" id="ctl01_TemplateBody_ContentPage2_HiddenDownloadPathField" /><input type="submit" name="ctl01$TemplateBody$ContentPage2$downloadButton" value="Download Path" id="ctl01_TemplateBody_ContentPage2_downloadButton" style="display:none" /> </div></div> </div> </div> </div> </div> </div> </div> <div data-label="Secondary Navigation" class="col-secondary cs-right d-none"> <div ID="WTZone8_Page1" class="WTZone "> </div> </div> </div> </div> <a class="backToTop" href="#PageTop">Back to Top</a> <footer id="ft" class="footer ClearFix"> <div class="footer-content"> <div class="container" data-label="Footer 1"> <div ID="WTZone9_Page1" class="WTZone "> <div id="ste_container_ciFooterContent1" class="ContentItemContainer"> <div id="ste_container_NewContentHtml2" class="ContentItemContainer"><div class="footer-nav"> <div class="footer-col"> <ul> <li><a href="https://www.imaging.org/">IMAGING.ORG</a></li> <li><a href="/IST/Conferences/Events_Overview.aspx">Events</a></li> <li><a href="/IST/Publications/Publications_Overview.aspx" class="">Publications</a></li> <li><a href="/IST/Standards/TC42.aspx">Standards</a></li> </ul> </div> <div class="footer-col"> <ul> <li><a href="/IST/Resources/Resources_Home.aspx">RESOURCES</a></li> <li><a href="/IST/Resources/CareerCenter.aspx">Careers</a></li> <li><a href="/IST/Policies/Policies.aspx">Policies</a></li> </ul> </div> <div class="footer-col"> <ul> <li><a href="/IST/About/About.aspx">ABOUT US</a></li> <li><a href="/IST/Membership/Individual_Membership.aspx">Membership</a></li> <li><a href="/IST/About/Donations.aspx">Donate</a></li> <li><a href="/IST/About/About.aspx">Contact</a></li> </ul> </div> </div></div><div id="ste_container_FooterContent" class="ContentItemContainer"><div class="FooterTop"><div class="FooterLogo"><img src="/images/75th%20logo%20alt%20blue%20white%20bkgrnd.png" alt=""> </div> <div class="FooterSocial"> <div class="FooterSocialText"> <p>Stay Connected!</p> </div> <div class="FooterSocialImg"><a href="https://www.linkedin.com/company/society-for-imaging-science-and-technology-is&amp;t-"><img src="/images/Icons/linkedin36blue.png" alt="" style="margin-right: 10px;"></a><a href="https://twitter.com/ImagingOrg"><img src="/images/Icons/twitter36blue.png" alt="" style=""></a></div> </div></div></div><div id="ste_container_NewContentHtml1" class="ContentItemContainer"><div class="FooterBottom"><p style="text-align: center;">© Copyright 2023 Society for Imaging Sciences and Technology. All Rights Reserved.</p></div></div><div class="ContentRecordPageButtonPanel"> </div> </div> </div> </div> </div> <div class="footer-nav-copyright"> <div class="container" role="navigation"> <div class="footer-copyright" data-label= "Footer 2"> <div ID="WTZone10_Page1" class="WTZone iPartsDisplayInlineBlock"> </div> </div> </div> </div> </footer> </div> <!--Jscript from Page.ResgisterStartupScript extention is loaded here --> <Div><script type="text/javascript">Sys.Application.add_load(function () {{ MasterPageBase_Init(); }});</script> <script type="text/javascript">Sys.Application.add_load(function() { { BreadCrumb_load('80409b89-ae6d-45a9-a9d4-96d522ff2047'); } }); </script> </Div><input name="ctl01$TemplateScripts$timeoutsoonmsg" type="hidden" id="timeoutsoonmsg" value="PGgyPllvdSBhcmUgYWJvdXQgdG8gYmUgc2lnbmVkIG91dDwvaDI+DQo8cD5Zb3Ugd2lsbCBiZSBzaWduZWQgb3V0IGluIDxzdHJvbmc+W1NlY29uZHNSZW1haW5pbmddPC9zdHJvbmc+IHNlY29uZHMgZHVlIHRvIGluYWN0aXZpdHkuIFlvdXIgY2hhbmdlcyB3aWxsIG5vdCBiZSBzYXZlZC4gVG8gY29udGludWUgd29ya2luZyBvbiB0aGUgd2Vic2l0ZSwgY2xpY2sgIlN0YXkgU2lnbmVkIEluIiBiZWxvdy48L3A+" /><input name="ctl01$TemplateScripts$timeoutsoonstaysignintxt" type="hidden" id="timeoutsoonstaysignintxt" value="U3RheSBTaWduZWQgSW4=" /><input name="ctl01$TemplateScripts$timeoutsoonlogouttxt" type="hidden" id="timeoutsoonlogouttxt" value="U2lnbiBPdXQ=" /><input name="ctl01$TemplateScripts$stayLoggedInURL" type="hidden" id="stayLoggedInURL" /><input name="ctl01$TemplateScripts$logoutUrl" type="hidden" id="logoutUrl" value="aHR0cHM6Ly93d3cuaW1hZ2luZy5vcmcvYXNpY29tbW9uL2NvbnRyb2xzL3NoYXJlZC9mb3Jtc2F1dGhlbnRpY2F0aW9uL2xvZ2luLmFzcHg/U2Vzc2lvblRpbWVvdXQ9MSZSZXR1cm5Vcmw9JTJmSVNUJTJmSVNUJTJmQ29uZmVyZW5jZXMlMmZFSSUyZkVJMjAyMyUyZkNvbmZlcmVuY2UlMmZDX0lRU1AuYXNweCUzZg==" /> <!-- Bootstrap Modal --> <div id="BootstrapModal" class="modal fade" tabindex="-1" role="dialog" aria-label="Modal" aria-hidden="true"> <div id="BootstrapDocument" class="modal-dialog modal-xl" role="document"> <div class="modal-content"> <div class="modal-header"> <button type="button" class="close" data-dismiss="modal" aria-label="Close"> <span aria-hidden="true">&times;</span> </button> </div> <div class="modal-body p-0 m-0"> <iframe id="ContentFrame" class="modal-content-iframe" width="100%" height="100px" frameborder="0"></iframe> </div> </div> </div> </div> <div id="ctl01_RadAjaxManager1SU"> <span id="ctl01_RadAjaxManager1" style="display:none;"></span> </div><div id="ctl01_WindowManager1" style="display:none;"> <div id="ctl01_GenericWindow" style="display:none;"> <div id="ctl01_GenericWindow_C" style="display:none;"> </div><input id="ctl01_GenericWindow_ClientState" name="ctl01_GenericWindow_ClientState" type="hidden" /> </div><div id="ctl01_ObjectBrowser" style="display:none;"> <div id="ctl01_ObjectBrowser_C" style="display:none;"> </div><input id="ctl01_ObjectBrowser_ClientState" name="ctl01_ObjectBrowser_ClientState" type="hidden" /> </div><div id="ctl01_ObjectBrowserDialog" style="display:none;"> <div id="ctl01_ObjectBrowserDialog_C" style="display:none;"> </div><input id="ctl01_ObjectBrowserDialog_ClientState" name="ctl01_ObjectBrowserDialog_ClientState" type="hidden" /> </div><div id="ctl01_WindowManager1_alerttemplate" style="display:none;"> <div class="rwDialogPopup radalert"> <div class="rwDialogText"> {1} </div> <div> <a onclick="$find('{0}').close(true);" class="rwPopupButton" href="javascript:void(0);"> <span class="rwOuterSpan"> <span class="rwInnerSpan">##LOC[OK]##</span> </span> </a> </div> </div> </div><div id="ctl01_WindowManager1_prompttemplate" style="display:none;"> <div class="rwDialogPopup radprompt"> <div class="rwDialogText"> {1} </div> <div> <script type="text/javascript"> function RadWindowprompt_detectenter(id, ev, input) { if (!ev) ev = window.event; if (ev.keyCode == 13) { var but = input.parentNode.parentNode.getElementsByTagName("A")[0]; if (but) { if (but.click) but.click(); else if (but.onclick) { but.focus(); var click = but.onclick; but.onclick = null; if (click) click.call(but); } } return false; } else return true; } </script> <input title="Enter Value" onkeydown="return RadWindowprompt_detectenter('{0}', event, this);" type="text" class="rwDialogInput" value="{2}" /> </div> <div> <a onclick="$find('{0}').close(this.parentNode.parentNode.getElementsByTagName('input')[0].value);" class="rwPopupButton" href="javascript:void(0);" ><span class="rwOuterSpan"><span class="rwInnerSpan">##LOC[OK]##</span></span></a> <a onclick="$find('{0}').close(null);" class="rwPopupButton" href="javascript:void(0);"><span class="rwOuterSpan"><span class="rwInnerSpan">##LOC[Cancel]##</span></span></a> </div> </div> </div><div id="ctl01_WindowManager1_confirmtemplate" style="display:none;"> <div class="rwDialogPopup radconfirm"> <div class="rwDialogText"> {1} </div> <div> <a onclick="$find('{0}').close(true);" class="rwPopupButton" href="javascript:void(0);" ><span class="rwOuterSpan"><span class="rwInnerSpan">##LOC[OK]##</span></span></a> <a onclick="$find('{0}').close(false);" class="rwPopupButton" href="javascript:void(0);"><span class="rwOuterSpan"><span class="rwInnerSpan">##LOC[Cancel]##</span></span></a> </div> </div> </div><input id="ctl01_WindowManager1_ClientState" name="ctl01_WindowManager1_ClientState" type="hidden" /> </div> <script type="text/javascript"> //<![CDATA[ var gCartCount; var cartDiv = $get("CartItemCount"); if (cartDiv != null){ jQuery.ajax({ type: "POST", url: gWebRoot + "/WebMethodUtilities.aspx/GetCartItemCount", data: "{}", contentType: "application/json; charset=utf-8", dataType: 'json', success: function(result) { if (result.d != '' && result.d != null) { gCartCount = result.d; if (gCartCount != null) { cartDiv.innerHTML = gCartCount; } } }, async: true }); } function CheckForChildren() { var contentRecordPageButtonPanelHasChildren = false; var contentRecordPageButtonPanel = jQuery('div.ContentRecordPageButtonPanel'); for (var i = 0, max = contentRecordPageButtonPanel.length; i < max; i++) { if (contentRecordPageButtonPanel[i].children.length > 0) { contentRecordPageButtonPanelHasChildren = true; break; } } if (!contentRecordPageButtonPanelHasChildren) { jQuery("Body").addClass("TemplateAreaEasyEditOn"); } } if (gIsEasyEditEnabled) CheckForChildren(); //]]> </script> <div class="aspNetHidden"> <input type="hidden" name="__VIEWSTATEGENERATOR" id="__VIEWSTATEGENERATOR" value="A03BAE16" /> </div> <script type="text/javascript"> //<![CDATA[ if(typeof(window['ctl01_TemplateBody_ContentPage1_contentitemdisplaybasejsmanager'])==='undefined') { window['ctl01_TemplateBody_ContentPage1_contentitemdisplaybasejsmanager']=new Asi_WebRoot_AsiCommon_ContentManagement_DownloadDocument(); }if(typeof(window['ctl01_TemplateBody_ContentPage1_contentitemdisplaybasejsmanager'])!=='undefined') { window['ctl01_TemplateBody_ContentPage1_contentitemdisplaybasejsmanager'].OnLoad('#ctl01_TemplateBody_ContentPage1_downloadButton','#ctl01_TemplateBody_ContentPage1_HiddenDownloadPathField'); }if(typeof(window['ctl01_TemplateBody_ContentPage2_contentitemdisplaybasejsmanager'])==='undefined') { window['ctl01_TemplateBody_ContentPage2_contentitemdisplaybasejsmanager']=new Asi_WebRoot_AsiCommon_ContentManagement_DownloadDocument(); }if(typeof(window['ctl01_TemplateBody_ContentPage2_contentitemdisplaybasejsmanager'])!=='undefined') { window['ctl01_TemplateBody_ContentPage2_contentitemdisplaybasejsmanager'].OnLoad('#ctl01_TemplateBody_ContentPage2_downloadButton','#ctl01_TemplateBody_ContentPage2_HiddenDownloadPathField'); }if(typeof(window['ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_contentitemdisplaybasejsmanager'])==='undefined') { window['ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_contentitemdisplaybasejsmanager']=new Asi_WebRoot_AsiCommon_ContentManagement_DownloadDocument(); }if(typeof(window['ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_contentitemdisplaybasejsmanager'])!=='undefined') { window['ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_contentitemdisplaybasejsmanager'].OnLoad('#ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_downloadButton','#ctl01_TemplateBody_WebPartManager1_gwpciCornerArt_ciCornerArt_HiddenDownloadPathField'); }__Document_Head_Init('https://www.imaging.org/NoCookies.html', '', false);window.__TsmHiddenField = $get('ctl01_ScriptManager1_TSM');NavigationList_NavControlId = '_rptWrapper';NavigationList_Init();var ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties = new SimpleSearchFieldProperties(); ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties.WatermarkClass = 'Watermarked'; ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties.WatermarkText = 'Keyword search'; ctl01_ciNewUtilityNavigationCommon2_ctl05_SearchTermsProperties.SearchTarget = 'https://www.imaging.org/Search'; var ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties = new SimpleSearchFieldProperties(); ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties.WatermarkClass = 'Watermarked'; ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties.WatermarkText = 'Keyword search'; ctl01_ciNewUtilityNavigationCommon2_ctl08_SearchTermsProperties.SearchTarget = 'https://www.imaging.org/Search'; NavigationList_NavControlId = 'ctl01_ciPrimaryNavigation_NavControl_NavMenu';NavigationList_Init();PageNavR_NavMenuClientID = 'ctl01_ciPrimaryNavigation_NavControl_NavMenu';var __wpmExportWarning='This Web Part Page has been personalized. As a result, one or more Web Part properties may contain confidential information. Make sure the properties contain information that is safe for others to read. After exporting this Web Part, view properties in the Web Part description file (.WebPart) by using a text editor such as Microsoft Notepad.';var __wpmCloseProviderWarning='You are about to close this Web Part. It is currently providing data to other Web Parts, and these connections will be deleted if this Web Part is closed. To close this Web Part, click OK. To keep this Web Part, click Cancel.';var __wpmDeleteWarning='You are about to permanently delete this Web Part. Are you sure you want to do this? To delete this Web Part, click OK. To keep this Web Part, click Cancel.';__wpm = new WebPartManager(); __wpm.overlayContainerElement = document.getElementById('ctl01_TemplateBody_WebPartManager1___Drag'); __wpm.personalizationScopeShared = false; var zoneElement; var zoneObject; zoneElement = document.getElementById('ctl01_TemplateBody_ContentPage1_WebPartZone1_Page1');if (zoneElement != null) {zoneObject = __wpm.AddZone(zoneElement, 'ctl01$TemplateBody$ContentPage1$WebPartZone1_Page1', true, false, 'Blue'); zoneObject.AddWebPart(document.getElementById('WebPart_gwpciCornerArt'), document.getElementById('WebPartTitle_gwpciCornerArt'), false); zoneObject.AddWebPart(document.getElementById('WebPart_gwpciSponsors_8f085c685acb4e879ddb7f29f28731f5'), document.getElementById('WebPartTitle_gwpciSponsors_8f085c685acb4e879ddb7f29f28731f5'), false); }zoneElement = document.getElementById('ctl01_TemplateBody_ContentPage2_WebPartZone2_Page1');if (zoneElement != null) {zoneObject = __wpm.AddZone(zoneElement, 'ctl01$TemplateBody$ContentPage2$WebPartZone2_Page1', true, false, 'Blue'); zoneObject.AddWebPart(document.getElementById('WebPart_gwpciConfCCO'), document.getElementById('WebPartTitle_gwpciConfCCO'), false); }if(typeof(window['ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_jsmanager'])=='undefined') { window['ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_jsmanager']=new Asi_Web_iParts_ContentCollectionOrganizer_ContentCollectionOrganizerDisplay('ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage', 'False'); }Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadMenu, {"_childListElementCssClass":"rmRootGroup rmToggleHandles rmHorizontal","_skin":"NaturalHeritageSites","attributes":{"Translate":"Yes","PerspectiveId":"80409b89-ae6d-45a9-a9d4-96d522ff2047","NavigationArea":"1","MaxDataBindDepth":"3"},"autoScrollMinimumWidth":100,"clientStateFieldID":"ctl01_ciPrimaryNavigation_NavControl_NavMenu_ClientState","collapseAnimation":"{\"duration\":450}","defaultGroupSettings":"{\"flow\":0,\"expandDirection\":2,\"offsetX\":0}","expandAnimation":"{\"duration\":450}","itemData":[],"showToggleHandle":true}, {"itemClicking":PageNavR_OnClientItemClicking,"itemClosed":PageNavR_OnItemClosed,"itemOpened":PageNavR_OnItemOpened,"load":PageNavR_OnClientLoadHandler}, null, $get("ctl01_ciPrimaryNavigation_NavControl_NavMenu")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadTabStrip, {"_autoPostBack":true,"_postBackOnClick":true,"_postBackReference":"__doPostBack(\u0027ctl01$TemplateBody$WebPartManager1$gwpciConfCCO$ciConfCCO$radTab_Top\u0027,\u0027arguments\u0027)","_scrollButtonsPosition":1,"_selectedIndex":1,"_skin":"MetroTouch","causesValidation":false,"clientStateFieldID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radTab_Top_ClientState","enableAriaSupport":true,"multiPageID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage","selectedIndexes":["1"],"tabData":[{"value":"1","_implPageViewID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_1","attributes":{"translate":"yes"}},{"value":"2","_implPageViewID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_2","attributes":{"translate":"yes"}},{"value":"3","_implPageViewID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_3","attributes":{"translate":"yes"}},{"value":"4","_implPageViewID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_4","attributes":{"translate":"yes"}}]}, null, null, $get("ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radTab_Top")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadMultiPage, {"clientStateFieldID":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage_ClientState","pageViewData":[{"id":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_1"},{"id":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_2"},{"id":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_3"},{"id":"ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_Page_4"}],"selectedIndex":1}, null, null, $get("ctl01_TemplateBody_WebPartManager1_gwpciConfCCO_ciConfCCO_radPage")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadAjaxManager, {"_updatePanels":"","ajaxSettings":[],"clientEvents":{OnRequestStart:"",OnResponseEnd:""},"defaultLoadingPanelID":"AjaxStatusLoadingPanel","enableAJAX":true,"enableHistory":false,"links":[],"styles":[],"uniqueID":"ctl01$RadAjaxManager1","updatePanelsRenderMode":0}, null, null, $get("ctl01_RadAjaxManager1")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadWindow, {"_dockMode":false,"behaviors":117,"clientStateFieldID":"ctl01_GenericWindow_ClientState","enableAriaSupport":true,"formID":"aspnetForm","height":"550px","iconUrl":"","localization":"{\"Close\":\"Close\",\"Maximize\":\"Maximize\",\"Minimize\":\"Minimize\",\"Reload\":\"Reload\",\"PinOn\":\"PinOn\",\"PinOff\":\"PinOff\",\"Restore\":\"Restore\",\"OK\":\"OK\",\"Cancel\":\"Cancel\",\"Yes\":\"Yes\",\"No\":\"No\"}","minimizeIconUrl":"","modal":true,"name":"GenericWindow","shortcuts":"[[\u0027close\u0027,\u0027Esc\u0027]]","showContentDuringLoad":false,"skin":"MetroTouch","visibleStatusbar":false,"width":"800px"}, null, null, $get("ctl01_GenericWindow")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadWindow, {"_dockMode":false,"behaviors":117,"clientStateFieldID":"ctl01_ObjectBrowser_ClientState","enableAriaSupport":true,"formID":"aspnetForm","height":"550px","iconUrl":"","localization":"{\"Close\":\"Close\",\"Maximize\":\"Maximize\",\"Minimize\":\"Minimize\",\"Reload\":\"Reload\",\"PinOn\":\"PinOn\",\"PinOff\":\"PinOff\",\"Restore\":\"Restore\",\"OK\":\"OK\",\"Cancel\":\"Cancel\",\"Yes\":\"Yes\",\"No\":\"No\"}","minimizeIconUrl":"","modal":true,"name":"ObjectBrowser","shortcuts":"[[\u0027close\u0027,\u0027Esc\u0027]]","showContentDuringLoad":false,"skin":"MetroTouch","visibleStatusbar":false,"width":"760px"}, null, null, $get("ctl01_ObjectBrowser")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadWindow, {"_dockMode":false,"behaviors":117,"clientStateFieldID":"ctl01_ObjectBrowserDialog_ClientState","enableAriaSupport":true,"formID":"aspnetForm","height":"400px","iconUrl":"","localization":"{\"Close\":\"Close\",\"Maximize\":\"Maximize\",\"Minimize\":\"Minimize\",\"Reload\":\"Reload\",\"PinOn\":\"PinOn\",\"PinOff\":\"PinOff\",\"Restore\":\"Restore\",\"OK\":\"OK\",\"Cancel\":\"Cancel\",\"Yes\":\"Yes\",\"No\":\"No\"}","minimizeIconUrl":"","modal":true,"name":"ObjectBrowserDialog","shortcuts":"[[\u0027close\u0027,\u0027Esc\u0027]]","showContentDuringLoad":false,"skin":"MetroTouch","visibleStatusbar":false,"width":"600px"}, null, null, $get("ctl01_ObjectBrowserDialog")); }); Sys.Application.add_init(function() { $create(Telerik.Web.UI.RadWindowManager, {"behaviors":117,"clientStateFieldID":"ctl01_WindowManager1_ClientState","enableAriaSupport":true,"formID":"aspnetForm","iconUrl":"","localization":"{\"Close\":\"Close\",\"Maximize\":\"Maximize\",\"Minimize\":\"Minimize\",\"Reload\":\"Reload\",\"PinOn\":\"PinOn\",\"PinOff\":\"PinOff\",\"Restore\":\"Restore\",\"OK\":\"OK\",\"Cancel\":\"Cancel\",\"Yes\":\"Yes\",\"No\":\"No\"}","minimizeIconUrl":"","name":"WindowManager1","shortcuts":"[[\u0027close\u0027,\u0027Esc\u0027]]","skin":"MetroTouch","windowControls":"['ctl01_GenericWindow','ctl01_ObjectBrowser','ctl01_ObjectBrowserDialog']"}, null, {"child":"ctl01_GenericWindow"}, $get("ctl01_WindowManager1")); }); //]]> </script> </form> <div id="fb-root"></div> </body> </html>

Pages: 1 2 3 4 5 6 7 8 9 10