CINXE.COM
Search | arXiv e-print repository
<!DOCTYPE html> <html lang="en"> <head> <meta charset="utf-8"/> <meta name="viewport" content="width=device-width, initial-scale=1"/> <!-- new favicon config and versions by realfavicongenerator.net --> <link rel="apple-touch-icon" sizes="180x180" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/apple-touch-icon.png"> <link rel="icon" type="image/png" sizes="32x32" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-32x32.png"> <link rel="icon" type="image/png" sizes="16x16" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon-16x16.png"> <link rel="manifest" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/site.webmanifest"> <link rel="mask-icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/safari-pinned-tab.svg" color="#b31b1b"> <link rel="shortcut icon" href="https://static.arxiv.org/static/base/1.0.0a5/images/icons/favicon.ico"> <meta name="msapplication-TileColor" content="#b31b1b"> <meta name="msapplication-config" content="images/icons/browserconfig.xml"> <meta name="theme-color" content="#b31b1b"> <!-- end favicon config --> <title>Search | arXiv e-print repository</title> <script defer src="https://static.arxiv.org/static/base/1.0.0a5/fontawesome-free-5.11.2-web/js/all.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/base/1.0.0a5/css/arxivstyle.css" /> <script type="text/x-mathjax-config"> MathJax.Hub.Config({ messageStyle: "none", extensions: ["tex2jax.js"], jax: ["input/TeX", "output/HTML-CSS"], tex2jax: { inlineMath: [ ['$','$'], ["\\(","\\)"] ], displayMath: [ ['$$','$$'], ["\\[","\\]"] ], processEscapes: true, ignoreClass: '.*', processClass: 'mathjax.*' }, TeX: { extensions: ["AMSmath.js", "AMSsymbols.js", "noErrors.js"], noErrors: { inlineDelimiters: ["$","$"], multiLine: false, style: { "font-size": "normal", "border": "" } } }, "HTML-CSS": { availableFonts: ["TeX"] } }); </script> <script src='//static.arxiv.org/MathJax-2.7.3/MathJax.js'></script> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/notification.js"></script> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/bulma-tooltip.min.css" /> <link rel="stylesheet" href="https://static.arxiv.org/static/search/0.5.6/css/search.css" /> <script src="https://code.jquery.com/jquery-3.2.1.slim.min.js" integrity="sha256-k2WSCIexGzOj3Euiig+TlR8gA0EmPjuc79OEeY5L45g=" crossorigin="anonymous"></script> <script src="https://static.arxiv.org/static/search/0.5.6/js/fieldset.js"></script> <style> radio#cf-customfield_11400 { display: none; } </style> </head> <body> <header><a href="#main-container" class="is-sr-only">Skip to main content</a> <!-- contains Cornell logo and sponsor statement --> <div class="attribution level is-marginless" role="banner"> <div class="level-left"> <a class="level-item" href="https://cornell.edu/"><img src="https://static.arxiv.org/static/base/1.0.0a5/images/cornell-reduced-white-SMALL.svg" alt="Cornell University" width="200" aria-label="logo" /></a> </div> <div class="level-right is-marginless"><p class="sponsors level-item is-marginless"><span id="support-ack-url">We gratefully acknowledge support from<br /> the Simons Foundation, <a href="https://info.arxiv.org/about/ourmembers.html">member institutions</a>, and all contributors. <a href="https://info.arxiv.org/about/donate.html">Donate</a></span></p></div> </div> <!-- contains arXiv identity and search bar --> <div class="identity level is-marginless"> <div class="level-left"> <div class="level-item"> <a class="arxiv" href="https://arxiv.org/" aria-label="arxiv-logo"> <img src="https://static.arxiv.org/static/base/1.0.0a5/images/arxiv-logo-one-color-white.svg" aria-label="logo" alt="arxiv logo" width="85" style="width:85px;"/> </a> </div> </div> <div class="search-block level-right"> <form class="level-item mini-search" method="GET" action="https://arxiv.org/search"> <div class="field has-addons"> <div class="control"> <input class="input is-small" type="text" name="query" placeholder="Search..." aria-label="Search term or terms" /> <p class="help"><a href="https://info.arxiv.org/help">Help</a> | <a href="https://arxiv.org/search/advanced">Advanced Search</a></p> </div> <div class="control"> <div class="select is-small"> <select name="searchtype" aria-label="Field to search"> <option value="all" selected="selected">All fields</option> <option value="title">Title</option> <option value="author">Author</option> <option value="abstract">Abstract</option> <option value="comments">Comments</option> <option value="journal_ref">Journal reference</option> <option value="acm_class">ACM classification</option> <option value="msc_class">MSC classification</option> <option value="report_num">Report number</option> <option value="paper_id">arXiv identifier</option> <option value="doi">DOI</option> <option value="orcid">ORCID</option> <option value="author_id">arXiv author ID</option> <option value="help">Help pages</option> <option value="full_text">Full text</option> </select> </div> </div> <input type="hidden" name="source" value="header"> <button class="button is-small is-cul-darker">Search</button> </div> </form> </div> </div> <!-- closes identity --> <div class="container"> <div class="user-tools is-size-7 has-text-right has-text-weight-bold" role="navigation" aria-label="User menu"> <a href="https://arxiv.org/login">Login</a> </div> </div> </header> <main class="container" id="main-container"> <div class="level is-marginless"> <div class="level-left"> <h1 class="title is-clearfix"> Showing 1–50 of 69 results for author: <span class="mathjax">Hassan, H</span> </h1> </div> <div class="level-right is-hidden-mobile"> <!-- feedback for mobile is moved to footer --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> <div class="content"> <form method="GET" action="/search/cs" aria-role="search"> Searching in archive <strong>cs</strong>. <a href="/search/?searchtype=author&query=Hassan%2C+H">Search in all archives.</a> <div class="field has-addons-tablet"> <div class="control is-expanded"> <label for="query" class="hidden-label">Search term or terms</label> <input class="input is-medium" id="query" name="query" placeholder="Search term..." type="text" value="Hassan, H"> </div> <div class="select control is-medium"> <label class="is-hidden" for="searchtype">Field</label> <select class="is-medium" id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> </div> <div class="control"> <button class="button is-link is-medium">Search</button> </div> </div> <div class="field"> <div class="control is-size-7"> <label class="radio"> <input checked id="abstracts-0" name="abstracts" type="radio" value="show"> Show abstracts </label> <label class="radio"> <input id="abstracts-1" name="abstracts" type="radio" value="hide"> Hide abstracts </label> </div> </div> <div class="is-clearfix" style="height: 2.5em"> <div class="is-pulled-right"> <a href="/search/advanced?terms-0-term=Hassan%2C+H&terms-0-field=author&size=50&order=-announced_date_first">Advanced Search</a> </div> </div> <input type="hidden" name="order" value="-announced_date_first"> <input type="hidden" name="size" value="50"> </form> <div class="level breathe-horizontal"> <div class="level-left"> <form method="GET" action="/search/"> <div style="display: none;"> <select id="searchtype" name="searchtype"><option value="all">All fields</option><option value="title">Title</option><option selected value="author">Author(s)</option><option value="abstract">Abstract</option><option value="comments">Comments</option><option value="journal_ref">Journal reference</option><option value="acm_class">ACM classification</option><option value="msc_class">MSC classification</option><option value="report_num">Report number</option><option value="paper_id">arXiv identifier</option><option value="doi">DOI</option><option value="orcid">ORCID</option><option value="license">License (URI)</option><option value="author_id">arXiv author ID</option><option value="help">Help pages</option><option value="full_text">Full text</option></select> <input id="query" name="query" type="text" value="Hassan, H"> <ul id="abstracts"><li><input checked id="abstracts-0" name="abstracts" type="radio" value="show"> <label for="abstracts-0">Show abstracts</label></li><li><input id="abstracts-1" name="abstracts" type="radio" value="hide"> <label for="abstracts-1">Hide abstracts</label></li></ul> </div> <div class="box field is-grouped is-grouped-multiline level-item"> <div class="control"> <span class="select is-small"> <select id="size" name="size"><option value="25">25</option><option selected value="50">50</option><option value="100">100</option><option value="200">200</option></select> </span> <label for="size">results per page</label>. </div> <div class="control"> <label for="order">Sort results by</label> <span class="select is-small"> <select id="order" name="order"><option selected value="-announced_date_first">Announcement date (newest first)</option><option value="announced_date_first">Announcement date (oldest first)</option><option value="-submitted_date">Submission date (newest first)</option><option value="submitted_date">Submission date (oldest first)</option><option value="">Relevance</option></select> </span> </div> <div class="control"> <button class="button is-small is-link">Go</button> </div> </div> </form> </div> </div> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Hassan%2C+H&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Hassan%2C+H&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Hassan%2C+H&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <ol class="breathe-horizontal" start="1"> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2407.00541">arXiv:2407.00541</a> <span> [<a href="https://arxiv.org/pdf/2407.00541">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Information Retrieval">cs.IR</span> </div> </div> <p class="title is-5 mathjax"> Answering real-world clinical questions using large language model based systems </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Low%2C+Y+S">Yen Sia Low</a>, <a href="/search/cs?searchtype=author&query=Jackson%2C+M+L">Michael L. Jackson</a>, <a href="/search/cs?searchtype=author&query=Hyde%2C+R+J">Rebecca J. Hyde</a>, <a href="/search/cs?searchtype=author&query=Brown%2C+R+E">Robert E. Brown</a>, <a href="/search/cs?searchtype=author&query=Sanghavi%2C+N+M">Neil M. Sanghavi</a>, <a href="/search/cs?searchtype=author&query=Baldwin%2C+J+D">Julian D. Baldwin</a>, <a href="/search/cs?searchtype=author&query=Pike%2C+C+W">C. William Pike</a>, <a href="/search/cs?searchtype=author&query=Muralidharan%2C+J">Jananee Muralidharan</a>, <a href="/search/cs?searchtype=author&query=Hui%2C+G">Gavin Hui</a>, <a href="/search/cs?searchtype=author&query=Alexander%2C+N">Natasha Alexander</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hadeel Hassan</a>, <a href="/search/cs?searchtype=author&query=Nene%2C+R+V">Rahul V. Nene</a>, <a href="/search/cs?searchtype=author&query=Pike%2C+M">Morgan Pike</a>, <a href="/search/cs?searchtype=author&query=Pokrzywa%2C+C+J">Courtney J. Pokrzywa</a>, <a href="/search/cs?searchtype=author&query=Vedak%2C+S">Shivam Vedak</a>, <a href="/search/cs?searchtype=author&query=Yan%2C+A+P">Adam Paul Yan</a>, <a href="/search/cs?searchtype=author&query=Yao%2C+D">Dong-han Yao</a>, <a href="/search/cs?searchtype=author&query=Zipursky%2C+A+R">Amy R. Zipursky</a>, <a href="/search/cs?searchtype=author&query=Dinh%2C+C">Christina Dinh</a>, <a href="/search/cs?searchtype=author&query=Ballentine%2C+P">Philip Ballentine</a>, <a href="/search/cs?searchtype=author&query=Derieg%2C+D+C">Dan C. Derieg</a>, <a href="/search/cs?searchtype=author&query=Polony%2C+V">Vladimir Polony</a>, <a href="/search/cs?searchtype=author&query=Chawdry%2C+R+N">Rehan N. Chawdry</a>, <a href="/search/cs?searchtype=author&query=Davies%2C+J">Jordan Davies</a>, <a href="/search/cs?searchtype=author&query=Hyde%2C+B+B">Brigham B. Hyde</a> , et al. (2 additional authors not shown) </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2407.00541v1-abstract-short" style="display: inline;"> Evidence to guide healthcare decisions is often limited by a lack of relevant and trustworthy literature as well as difficulty in contextualizing existing research for a specific patient. Large language models (LLMs) could potentially address both challenges by either summarizing published literature or generating new studies based on real-world data (RWD). We evaluated the ability of five LLM-bas… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.00541v1-abstract-full').style.display = 'inline'; document.getElementById('2407.00541v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2407.00541v1-abstract-full" style="display: none;"> Evidence to guide healthcare decisions is often limited by a lack of relevant and trustworthy literature as well as difficulty in contextualizing existing research for a specific patient. Large language models (LLMs) could potentially address both challenges by either summarizing published literature or generating new studies based on real-world data (RWD). We evaluated the ability of five LLM-based systems in answering 50 clinical questions and had nine independent physicians review the responses for relevance, reliability, and actionability. As it stands, general-purpose LLMs (ChatGPT-4, Claude 3 Opus, Gemini Pro 1.5) rarely produced answers that were deemed relevant and evidence-based (2% - 10%). In contrast, retrieval augmented generation (RAG)-based and agentic LLM systems produced relevant and evidence-based answers for 24% (OpenEvidence) to 58% (ChatRWD) of questions. Only the agentic ChatRWD was able to answer novel questions compared to other LLMs (65% vs. 0-9%). These results suggest that while general-purpose LLMs should not be used as-is, a purpose-built system for evidence summarization based on RAG and one for generating novel evidence working synergistically would improve availability of pertinent evidence for patient care. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2407.00541v1-abstract-full').style.display = 'none'; document.getElementById('2407.00541v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2024. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">28 pages (2 figures, 3 tables) inclusive of 8 pages of supplemental materials (4 supplemental figures and 4 supplemental tables)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2405.19332">arXiv:2405.19332</a> <span> [<a href="https://arxiv.org/pdf/2405.19332">pdf</a>, <a href="https://arxiv.org/format/2405.19332">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Self-Exploring Language Models: Active Preference Elicitation for Online Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhang%2C+S">Shenao Zhang</a>, <a href="/search/cs?searchtype=author&query=Yu%2C+D">Donghan Yu</a>, <a href="/search/cs?searchtype=author&query=Sharma%2C+H">Hiteshi Sharma</a>, <a href="/search/cs?searchtype=author&query=Zhong%2C+H">Han Zhong</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+Z">Zhihan Liu</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+Z">Ziyi Yang</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+S">Shuohang Wang</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hany Hassan</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Z">Zhaoran Wang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2405.19332v3-abstract-short" style="display: inline;"> Preference optimization, particularly through Reinforcement Learning from Human Feedback (RLHF), has achieved significant success in aligning Large Language Models (LLMs) to adhere to human intentions. Unlike offline alignment with a fixed dataset, online feedback collection from humans or AI on model generations typically leads to more capable reward models and better-aligned LLMs through an iter… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.19332v3-abstract-full').style.display = 'inline'; document.getElementById('2405.19332v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2405.19332v3-abstract-full" style="display: none;"> Preference optimization, particularly through Reinforcement Learning from Human Feedback (RLHF), has achieved significant success in aligning Large Language Models (LLMs) to adhere to human intentions. Unlike offline alignment with a fixed dataset, online feedback collection from humans or AI on model generations typically leads to more capable reward models and better-aligned LLMs through an iterative process. However, achieving a globally accurate reward model requires systematic exploration to generate diverse responses that span the vast space of natural language. Random sampling from standard reward-maximizing LLMs alone is insufficient to fulfill this requirement. To address this issue, we propose a bilevel objective optimistically biased towards potentially high-reward responses to actively explore out-of-distribution regions. By solving the inner-level problem with the reparameterized reward function, the resulting algorithm, named Self-Exploring Language Models (SELM), eliminates the need for a separate RM and iteratively updates the LLM with a straightforward objective. Compared to Direct Preference Optimization (DPO), the SELM objective reduces indiscriminate favor of unseen extrapolations and enhances exploration efficiency. Our experimental results demonstrate that when fine-tuned on Zephyr-7B-SFT and Llama-3-8B-Instruct models, SELM significantly boosts the performance on instruction-following benchmarks such as MT-Bench and AlpacaEval 2.0, as well as various standard academic benchmarks in different settings. Our code and models are available at https://github.com/shenao-zhang/SELM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2405.19332v3-abstract-full').style.display = 'none'; document.getElementById('2405.19332v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 May, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2402.15216">arXiv:2402.15216</a> <span> [<a href="https://arxiv.org/pdf/2402.15216">pdf</a>, <a href="https://arxiv.org/ps/2402.15216">ps</a>, <a href="https://arxiv.org/format/2402.15216">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Label-efficient Multi-organ Segmentation Method with Diffusion Model </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Huang%2C+Y">Yongzhi Huang</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+J">Jinxin Zhu</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Haseeb Hassan</a>, <a href="/search/cs?searchtype=author&query=Su%2C+L">Liyilei Su</a>, <a href="/search/cs?searchtype=author&query=Li%2C+J">Jingyu Li</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+B">Binding Huang</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2402.15216v1-abstract-short" style="display: inline;"> Accurate segmentation of multiple organs in Computed Tomography (CT) images plays a vital role in computer-aided diagnosis systems. Various supervised-learning approaches have been proposed recently. However, these methods heavily depend on a large amount of high-quality labeled data, which is expensive to obtain in practice. In this study, we present a label-efficient learning approach using a pr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.15216v1-abstract-full').style.display = 'inline'; document.getElementById('2402.15216v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2402.15216v1-abstract-full" style="display: none;"> Accurate segmentation of multiple organs in Computed Tomography (CT) images plays a vital role in computer-aided diagnosis systems. Various supervised-learning approaches have been proposed recently. However, these methods heavily depend on a large amount of high-quality labeled data, which is expensive to obtain in practice. In this study, we present a label-efficient learning approach using a pre-trained diffusion model for multi-organ segmentation tasks in CT images. First, a denoising diffusion model was trained using unlabeled CT data, generating additional two-dimensional (2D) CT images. Then the pre-trained denoising diffusion network was transferred to the downstream multi-organ segmentation task, effectively creating a semi-supervised learning model that requires only a small amount of labeled data. Furthermore, linear classification and fine-tuning decoder strategies were employed to enhance the network's segmentation performance. Our generative model at 256x256 resolution achieves impressive performance in terms of Fr茅chet inception distance, spatial Fr茅chet inception distance, and F1-score, with values of 11.32, 46.93, and 73.1\%, respectively. These results affirm the diffusion model's ability to generate diverse and realistic 2D CT images. Additionally, our method achieves competitive multi-organ segmentation performance compared to state-of-the-art methods on the FLARE 2022 dataset, particularly in limited labeled data scenarios. Remarkably, even with only 1\% and 10\% labeled data, our method achieves Dice similarity coefficients (DSCs) of 71.56\% and 78.51\% after fine-tuning, respectively. The method achieves a DSC score of 51.81\% using just four labeled CT scans. These results demonstrate the efficacy of our approach in overcoming the limitations of supervised learning heavily reliant on large-scale labeled data. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2402.15216v1-abstract-full').style.display = 'none'; document.getElementById('2402.15216v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 23 February, 2024; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2024. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2312.05279">arXiv:2312.05279</a> <span> [<a href="https://arxiv.org/pdf/2312.05279">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> Quantitative perfusion maps using a novelty spatiotemporal convolutional neural network </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Cao%2C+A">Anbo Cao</a>, <a href="/search/cs?searchtype=author&query=Le%2C+P">Pin-Yu Le</a>, <a href="/search/cs?searchtype=author&query=Qie%2C+Z">Zhonghui Qie</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Haseeb Hassan</a>, <a href="/search/cs?searchtype=author&query=Guo%2C+Y">Yingwei Guo</a>, <a href="/search/cs?searchtype=author&query=Zaman%2C+A">Asim Zaman</a>, <a href="/search/cs?searchtype=author&query=Lu%2C+J">Jiaxi Lu</a>, <a href="/search/cs?searchtype=author&query=Zeng%2C+X">Xueqiang Zeng</a>, <a href="/search/cs?searchtype=author&query=Yang%2C+H">Huihui Yang</a>, <a href="/search/cs?searchtype=author&query=Miao%2C+X">Xiaoqiang Miao</a>, <a href="/search/cs?searchtype=author&query=Han%2C+T">Taiyu Han</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+G">Guangtao Huang</a>, <a href="/search/cs?searchtype=author&query=Kang%2C+Y">Yan Kang</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+Y">Yu Luo</a>, <a href="/search/cs?searchtype=author&query=Guo%2C+J">Jia Guo</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2312.05279v1-abstract-short" style="display: inline;"> Dynamic susceptibility contrast magnetic resonance imaging (DSC-MRI) is widely used to evaluate acute ischemic stroke to distinguish salvageable tissue and infarct core. For this purpose, traditional methods employ deconvolution techniques, like singular value decomposition, which are known to be vulnerable to noise, potentially distorting the derived perfusion parameters. However, deep learning t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.05279v1-abstract-full').style.display = 'inline'; document.getElementById('2312.05279v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2312.05279v1-abstract-full" style="display: none;"> Dynamic susceptibility contrast magnetic resonance imaging (DSC-MRI) is widely used to evaluate acute ischemic stroke to distinguish salvageable tissue and infarct core. For this purpose, traditional methods employ deconvolution techniques, like singular value decomposition, which are known to be vulnerable to noise, potentially distorting the derived perfusion parameters. However, deep learning technology could leverage it, which can accurately estimate clinical perfusion parameters compared to traditional clinical approaches. Therefore, this study presents a perfusion parameters estimation network that considers spatial and temporal information, the Spatiotemporal Network (ST-Net), for the first time. The proposed network comprises a designed physical loss function to enhance model performance further. The results indicate that the network can accurately estimate perfusion parameters, including cerebral blood volume (CBV), cerebral blood flow (CBF), and time to maximum of the residual function (Tmax). The structural similarity index (SSIM) mean values for CBV, CBF, and Tmax parameters were 0.952, 0.943, and 0.863, respectively. The DICE score for the hypo-perfused region reached 0.859, demonstrating high consistency. The proposed model also maintains time efficiency, closely approaching the performance of commercial gold-standard software. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2312.05279v1-abstract-full').style.display = 'none'; document.getElementById('2312.05279v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 December, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> December 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2310.07252">arXiv:2310.07252</a> <span> [<a href="https://arxiv.org/pdf/2310.07252">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> A Comparative Study of Pre-trained CNNs and GRU-Based Attention for Image Caption Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Khan%2C+R">Rashid Khan</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+B">Bingding Huang</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Haseeb Hassan</a>, <a href="/search/cs?searchtype=author&query=Zaman%2C+A">Asim Zaman</a>, <a href="/search/cs?searchtype=author&query=Ye%2C+Z">Zhongfu Ye</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2310.07252v1-abstract-short" style="display: inline;"> Image captioning is a challenging task involving generating a textual description for an image using computer vision and natural language processing techniques. This paper proposes a deep neural framework for image caption generation using a GRU-based attention mechanism. Our approach employs multiple pre-trained convolutional neural networks as the encoder to extract features from the image and a… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.07252v1-abstract-full').style.display = 'inline'; document.getElementById('2310.07252v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2310.07252v1-abstract-full" style="display: none;"> Image captioning is a challenging task involving generating a textual description for an image using computer vision and natural language processing techniques. This paper proposes a deep neural framework for image caption generation using a GRU-based attention mechanism. Our approach employs multiple pre-trained convolutional neural networks as the encoder to extract features from the image and a GRU-based language model as the decoder to generate descriptive sentences. To improve performance, we integrate the Bahdanau attention model with the GRU decoder to enable learning to focus on specific image parts. We evaluate our approach using the MSCOCO and Flickr30k datasets and show that it achieves competitive scores compared to state-of-the-art methods. Our proposed framework can bridge the gap between computer vision and natural language and can be extended to specific domains. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2310.07252v1-abstract-full').style.display = 'none'; document.getElementById('2310.07252v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 11 October, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">15pages, 10 figures, 5 tables. 2023 the 5th International Conference on Robotics and Computer Vision (ICRCV 2023). arXiv admin note: substantial text overlap with arXiv:2203.01594</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.08024">arXiv:2307.08024</a> <span> [<a href="https://arxiv.org/pdf/2307.08024">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Bayesian inference for data-efficient, explainable, and safe robotic motion planning: A review </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zhou%2C+C">Chengmin Zhou</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+C">Chao Wang</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Haseeb Hassan</a>, <a href="/search/cs?searchtype=author&query=Shah%2C+H">Himat Shah</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+B">Bingding Huang</a>, <a href="/search/cs?searchtype=author&query=Fr%C3%A4nti%2C+P">Pasi Fr盲nti</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.08024v1-abstract-short" style="display: inline;"> Bayesian inference has many advantages in robotic motion planning over four perspectives: The uncertainty quantification of the policy, safety (risk-aware) and optimum guarantees of robot motions, data-efficiency in training of reinforcement learning, and reducing the sim2real gap when the robot is applied to real-world tasks. However, the application of Bayesian inference in robotic motion planni… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.08024v1-abstract-full').style.display = 'inline'; document.getElementById('2307.08024v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.08024v1-abstract-full" style="display: none;"> Bayesian inference has many advantages in robotic motion planning over four perspectives: The uncertainty quantification of the policy, safety (risk-aware) and optimum guarantees of robot motions, data-efficiency in training of reinforcement learning, and reducing the sim2real gap when the robot is applied to real-world tasks. However, the application of Bayesian inference in robotic motion planning is lagging behind the comprehensive theory of Bayesian inference. Further, there are no comprehensive reviews to summarize the progress of Bayesian inference to give researchers a systematic understanding in robotic motion planning. This paper first provides the probabilistic theories of Bayesian inference which are the preliminary of Bayesian inference for complex cases. Second, the Bayesian estimation is given to estimate the posterior of policies or unknown functions which are used to compute the policy. Third, the classical model-based Bayesian RL and model-free Bayesian RL algorithms for robotic motion planning are summarized, while these algorithms in complex cases are also analyzed. Fourth, the analysis of Bayesian inference in inverse RL is given to infer the reward functions in a data-efficient manner. Fifth, we systematically present the hybridization of Bayesian inference and RL which is a promising direction to improve the convergence of RL for better motion planning. Sixth, given the Bayesian inference, we present the interpretable and safe robotic motion plannings which are the hot research topic recently. Finally, all algorithms reviewed in this paper are summarized analytically as the knowledge graphs, and the future of Bayesian inference for robotic motion planning is also discussed, to pave the way for data-efficient, explainable, and safe robotic motion planning strategies for practical applications. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.08024v1-abstract-full').style.display = 'none'; document.getElementById('2307.08024v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2307.06300">arXiv:2307.06300</a> <span> [<a href="https://arxiv.org/pdf/2307.06300">pdf</a>, <a href="https://arxiv.org/format/2307.06300">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> </div> </div> <p class="title is-5 mathjax"> A Comparative Analysis Between the Additive and the Multiplicative Extended Kalman Filter for Satellite Attitude Determination </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hassan%2C+H+A">Hamza A. Hassan</a>, <a href="/search/cs?searchtype=author&query=Tolstrup%2C+W">William Tolstrup</a>, <a href="/search/cs?searchtype=author&query=Suriana%2C+J+P">Johanes P. Suriana</a>, <a href="/search/cs?searchtype=author&query=Kiziloklu%2C+I+D">Ibrahim D. Kiziloklu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2307.06300v2-abstract-short" style="display: inline;"> The general consensus is that the Multiplicative Extended Kalman Filter (MEKF) is superior to the Additive Extended Kalman Filter (AEKF) based on a wealth of theoretical evidence. This paper deals with a practical comparison between the two filters in simulation with the goal of verifying if the previous theoretical foundations are true. The AEKF and MEKF are two variants of the Extended Kalman Fi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.06300v2-abstract-full').style.display = 'inline'; document.getElementById('2307.06300v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2307.06300v2-abstract-full" style="display: none;"> The general consensus is that the Multiplicative Extended Kalman Filter (MEKF) is superior to the Additive Extended Kalman Filter (AEKF) based on a wealth of theoretical evidence. This paper deals with a practical comparison between the two filters in simulation with the goal of verifying if the previous theoretical foundations are true. The AEKF and MEKF are two variants of the Extended Kalman Filter that differ in their approach to linearizing the system dynamics. The AEKF uses an additive correction term to update the state estimate, while the MEKF uses a multiplicative correction term. The two also differ in the state of which they use. The AEKF uses the quaternion as its state while the MEKF uses the Gibbs vector as its state. The results show that the MEKF consistently outperforms the AEKF in terms of estimation accuracy with lower uncertainty. The AEKF is more computationally efficient, but the difference is so low that it is almost negligible and it has no effect on a real-time application. Overall, the results suggest that the MEKF is a better choise for satellite attitude estimation due to its superior estimation accuracy and lower uncertainty, which agrees with the statements from previous work <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2307.06300v2-abstract-full').style.display = 'none'; document.getElementById('2307.06300v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 12 July, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2305.19835">arXiv:2305.19835</a> <span> [<a href="https://arxiv.org/pdf/2305.19835">pdf</a>, <a href="https://arxiv.org/ps/2305.19835">ps</a>, <a href="https://arxiv.org/format/2305.19835">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Deliberate then Generate: Enhanced Prompting Framework for Text Generation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Li%2C+B">Bei Li</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+R">Rui Wang</a>, <a href="/search/cs?searchtype=author&query=Guo%2C+J">Junliang Guo</a>, <a href="/search/cs?searchtype=author&query=Song%2C+K">Kaitao Song</a>, <a href="/search/cs?searchtype=author&query=Tan%2C+X">Xu Tan</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hany Hassan</a>, <a href="/search/cs?searchtype=author&query=Menezes%2C+A">Arul Menezes</a>, <a href="/search/cs?searchtype=author&query=Xiao%2C+T">Tong Xiao</a>, <a href="/search/cs?searchtype=author&query=Bian%2C+J">Jiang Bian</a>, <a href="/search/cs?searchtype=author&query=Zhu%2C+J">JingBo Zhu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2305.19835v1-abstract-short" style="display: inline;"> Large language models (LLMs) have shown remarkable success across a wide range of natural language generation tasks, where proper prompt designs make great impacts. While existing prompting methods are normally restricted to providing correct information, in this paper, we encourage the model to deliberate by proposing a novel Deliberate then Generate (DTG) prompting framework, which consists of e… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.19835v1-abstract-full').style.display = 'inline'; document.getElementById('2305.19835v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2305.19835v1-abstract-full" style="display: none;"> Large language models (LLMs) have shown remarkable success across a wide range of natural language generation tasks, where proper prompt designs make great impacts. While existing prompting methods are normally restricted to providing correct information, in this paper, we encourage the model to deliberate by proposing a novel Deliberate then Generate (DTG) prompting framework, which consists of error detection instructions and candidates that may contain errors. DTG is a simple yet effective technique that can be applied to various text generation tasks with minimal modifications. We conduct extensive experiments on 20+ datasets across 7 text generation tasks, including summarization, translation, dialogue, and more. We show that DTG consistently outperforms existing prompting methods and achieves state-of-the-art performance on multiple text generation tasks. We also provide in-depth analyses to reveal the underlying mechanisms of DTG, which may inspire future research on prompting for LLMs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2305.19835v1-abstract-full').style.display = 'none'; document.getElementById('2305.19835v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 31 May, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2023. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2303.07445">arXiv:2303.07445</a> <span> [<a href="https://arxiv.org/pdf/2303.07445">pdf</a>, <a href="https://arxiv.org/format/2303.07445">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Improving DRAM Performance, Reliability, and Security by Rigorously Understanding Intrinsic DRAM Operation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2303.07445v1-abstract-short" style="display: inline;"> DRAM is the primary technology used for main memory in modern systems. Unfortunately, as DRAM scales down to smaller technology nodes, it faces key challenges in both data integrity and latency, which strongly affect overall system reliability, security, and performance. To develop reliable, secure, and high-performance DRAM-based main memory for future systems, it is critical to rigorously charac… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.07445v1-abstract-full').style.display = 'inline'; document.getElementById('2303.07445v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2303.07445v1-abstract-full" style="display: none;"> DRAM is the primary technology used for main memory in modern systems. Unfortunately, as DRAM scales down to smaller technology nodes, it faces key challenges in both data integrity and latency, which strongly affect overall system reliability, security, and performance. To develop reliable, secure, and high-performance DRAM-based main memory for future systems, it is critical to rigorously characterize, analyze, and understand various aspects (e.g., reliability, retention, latency, RowHammer vulnerability) of existing DRAM chips and their architecture. The goal of this dissertation is to 1) develop techniques and infrastructures to enable such rigorous characterization, analysis, and understanding, and 2) enable new mechanisms to improve DRAM performance, reliability, and security based on the developed understanding. To this end, in this dissertation, we 1) design, implement, and prototype a new practical-to-use and flexible FPGA-based DRAM characterization infrastructure (called SoftMC), 2) use the DRAM characterization infrastructure to develop a new experimental methodology (called U-TRR) to uncover the operation of existing proprietary in-DRAM RowHammer protection mechanisms and craft new RowHammer access patterns to efficiently circumvent these RowHammer protection mechanisms, 3) propose a new DRAM architecture, called SelfManaging DRAM, for enabling autonomous and efficient in-DRAM maintenance operations that enable not only better performance, efficiency, and reliability but also faster and easier adoption of changes to DRAM chips, and 4) propose a versatile DRAM substrate, called the Copy-Row (CROW) substrate, that enables new mechanisms for improving DRAM performance, energy consumption, and reliability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2303.07445v1-abstract-full').style.display = 'none'; document.getElementById('2303.07445v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 March, 2023; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2023. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Doctoral thesis</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2211.05838">arXiv:2211.05838</a> <span> [<a href="https://arxiv.org/pdf/2211.05838">pdf</a>, <a href="https://arxiv.org/format/2211.05838">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> DRAM Bender: An Extensible and Versatile FPGA-based Infrastructure to Easily Test State-of-the-art DRAM Chips </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Ya%C4%9Fl%C4%B1k%C3%A7%C4%B1%2C+A+G">A. Giray Ya臒l谋k莽谋</a>, <a href="/search/cs?searchtype=author&query=Tu%C4%9Frul%2C+Y+C">Yahya Can Tu臒rul</a>, <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+H">Haocong Luo</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Ergin%2C+O">O臒uz Ergin</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2211.05838v5-abstract-short" style="display: inline;"> To understand and improve DRAM performance, reliability, security and energy efficiency, prior works study characteristics of commodity DRAM chips. Unfortunately, state-of-the-art open source infrastructures capable of conducting such studies are obsolete, poorly supported, or difficult to use, or their inflexibility limit the types of studies they can conduct. We propose DRAM Bender, a new FPGA… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.05838v5-abstract-full').style.display = 'inline'; document.getElementById('2211.05838v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2211.05838v5-abstract-full" style="display: none;"> To understand and improve DRAM performance, reliability, security and energy efficiency, prior works study characteristics of commodity DRAM chips. Unfortunately, state-of-the-art open source infrastructures capable of conducting such studies are obsolete, poorly supported, or difficult to use, or their inflexibility limit the types of studies they can conduct. We propose DRAM Bender, a new FPGA-based infrastructure that enables experimental studies on state-of-the-art DRAM chips. DRAM Bender offers three key features at the same time. First, DRAM Bender enables directly interfacing with a DRAM chip through its low-level interface. This allows users to issue DRAM commands in arbitrary order and with finer-grained time intervals compared to other open source infrastructures. Second, DRAM Bender exposes easy-to-use C++ and Python programming interfaces, allowing users to quickly and easily develop different types of DRAM experiments. Third, DRAM Bender is easily extensible. The modular design of DRAM Bender allows extending it to (i) support existing and emerging DRAM interfaces, and (ii) run on new commercial or custom FPGA boards with little effort. To demonstrate that DRAM Bender is a versatile infrastructure, we conduct three case studies, two of which lead to new observations about the DRAM RowHammer vulnerability. In particular, we show that data patterns supported by DRAM Bender uncovers a larger set of bit-flips on a victim row compared to the data patterns commonly used by prior work. We demonstrate the extensibility of DRAM Bender by implementing it on five different FPGAs with DDR4 and DDR3 support. DRAM Bender is freely and openly available at https://github.com/CMU-SAFARI/DRAM-Bender. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2211.05838v5-abstract-full').style.display = 'none'; document.getElementById('2211.05838v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 12 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Extended version of paper that is to appear in IEEE Transactions on Computer-Aided Design of Integrated Circuits and Systems (TCAD)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.10198">arXiv:2209.10198</a> <span> [<a href="https://arxiv.org/pdf/2209.10198">pdf</a>, <a href="https://arxiv.org/format/2209.10198">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> HiRA: Hidden Row Activation for Reducing Refresh Latency of Off-the-Shelf DRAM Chips </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ya%C4%9Fl%C4%B1k%C3%A7%C4%B1%2C+A+G">Abdullah Giray Ya臒l谋k莽谋</a>, <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+H">Haocong Luo</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Ergin%2C+O">O臒uz Ergin</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.10198v1-abstract-short" style="display: inline;"> DRAM is the building block of modern main memory systems. DRAM cells must be periodically refreshed to prevent data loss. Refresh operations degrade system performance by interfering with memory accesses. As DRAM chip density increases with technology node scaling, refresh operations also increase because: 1) the number of DRAM rows in a chip increases; and 2) DRAM cells need additional refresh op… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.10198v1-abstract-full').style.display = 'inline'; document.getElementById('2209.10198v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.10198v1-abstract-full" style="display: none;"> DRAM is the building block of modern main memory systems. DRAM cells must be periodically refreshed to prevent data loss. Refresh operations degrade system performance by interfering with memory accesses. As DRAM chip density increases with technology node scaling, refresh operations also increase because: 1) the number of DRAM rows in a chip increases; and 2) DRAM cells need additional refresh operations to mitigate bit failures caused by RowHammer, a failure mechanism that becomes worse with technology node scaling. Thus, it is critical to enable refresh operations at low performance overhead. To this end, we propose a new operation, Hidden Row Activation (HiRA), and the HiRA Memory Controller (HiRA-MC). HiRA hides a refresh operation's latency by refreshing a row concurrently with accessing or refreshing another row within the same bank. Unlike prior works, HiRA achieves this parallelism without any modifications to off-the-shelf DRAM chips. To do so, it leverages the new observation that two rows in the same bank can be activated without data loss if the rows are connected to different charge restoration circuitry. We experimentally demonstrate on 56% real off-the-shelf DRAM chips that HiRA can reliably parallelize a DRAM row's refresh operation with refresh or activation of any of the 32% of the rows within the same bank. By doing so, HiRA reduces the overall latency of two refresh operations by 51.4%. HiRA-MC modifies the memory request scheduler to perform HiRA when a refresh operation can be performed concurrently with a memory access or another refresh. Our system-level evaluations show that HiRA-MC increases system performance by 12.6% and 3.73x as it reduces the performance degradation due to periodic refreshes and refreshes for RowHammer protection (preventive refreshes), respectively, for future DRAM chips with increased density and RowHammer vulnerability. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.10198v1-abstract-full').style.display = 'none'; document.getElementById('2209.10198v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in the 55th IEEE/ACM International Symposium on Microarchitecture (MICRO), 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2209.08939">arXiv:2209.08939</a> <span> [<a href="https://arxiv.org/pdf/2209.08939">pdf</a>, <a href="https://arxiv.org/format/2209.08939">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Image and Video Processing">eess.IV</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Computer Vision and Pattern Recognition">cs.CV</span> </div> </div> <p class="title is-5 mathjax"> 3D Cross-Pseudo Supervision (3D-CPS): A semi-supervised nnU-Net architecture for abdominal organ segmentation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Huang%2C+Y">Yongzhi Huang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+H">Hanwen Zhang</a>, <a href="/search/cs?searchtype=author&query=Yan%2C+Y">Yan Yan</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Haseeb Hassan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2209.08939v2-abstract-short" style="display: inline;"> Large curated datasets are necessary, but annotating medical images is a time-consuming, laborious, and expensive process. Therefore, recent supervised methods are focusing on utilizing a large amount of unlabeled data. However, to do so, is a challenging task. To address this problem, we propose a new 3D Cross-Pseudo Supervision (3D-CPS) method, a semi-supervised network architecture based on nnU… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.08939v2-abstract-full').style.display = 'inline'; document.getElementById('2209.08939v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2209.08939v2-abstract-full" style="display: none;"> Large curated datasets are necessary, but annotating medical images is a time-consuming, laborious, and expensive process. Therefore, recent supervised methods are focusing on utilizing a large amount of unlabeled data. However, to do so, is a challenging task. To address this problem, we propose a new 3D Cross-Pseudo Supervision (3D-CPS) method, a semi-supervised network architecture based on nnU-Net with the Cross-Pseudo Supervision method. We design a new nnU-Net based preprocessing. In addition, we set the semi-supervised loss weights to expand linearity with each epoch to prevent the model from low-quality pseudo-labels in the early training process. Our proposed method achieves an average dice similarity coefficient (DSC) of 0.881 and an average normalized surface distance (NSD) of 0.913 on the MICCAI FLARE2022 validation set (20 cases). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2209.08939v2-abstract-full').style.display = 'none'; document.getElementById('2209.08939v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 November, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 September, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">14 pages, 5 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2208.10993">arXiv:2208.10993</a> <span> [<a href="https://arxiv.org/pdf/2208.10993">pdf</a>, <a href="https://arxiv.org/format/2208.10993">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Application of federated learning techniques for arrhythmia classification using 12-lead ECG signals </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Gutierrez%2C+D+M+J">Daniel Mauricio Jimenez Gutierrez</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H+M">Hafiz Muuhammad Hassan</a>, <a href="/search/cs?searchtype=author&query=Landi%2C+L">Lorella Landi</a>, <a href="/search/cs?searchtype=author&query=Vitaletti%2C+A">Andrea Vitaletti</a>, <a href="/search/cs?searchtype=author&query=Chatzigiannakis%2C+I">Ioannis Chatzigiannakis</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2208.10993v3-abstract-short" style="display: inline;"> Artificial Intelligence-based (AI) analysis of large, curated medical datasets is promising for providing early detection, faster diagnosis, and more effective treatment using low-power Electrocardiography (ECG) monitoring devices information. However, accessing sensitive medical data from diverse sources is highly restricted since improper use, unsafe storage, or data leakage could violate a pers… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.10993v3-abstract-full').style.display = 'inline'; document.getElementById('2208.10993v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2208.10993v3-abstract-full" style="display: none;"> Artificial Intelligence-based (AI) analysis of large, curated medical datasets is promising for providing early detection, faster diagnosis, and more effective treatment using low-power Electrocardiography (ECG) monitoring devices information. However, accessing sensitive medical data from diverse sources is highly restricted since improper use, unsafe storage, or data leakage could violate a person's privacy. This work uses a Federated Learning (FL) privacy-preserving methodology to train AI models over heterogeneous sets of high-definition ECG from 12-lead sensor arrays collected from six heterogeneous sources. We evaluated the capacity of the resulting models to achieve equivalent performance compared to state-of-the-art models trained in a Centralized Learning (CL) fashion. Moreover, we assessed the performance of our solution over Independent and Identical distributed (IID) and non-IID federated data. Our methodology involves machine learning techniques based on Deep Neural Networks and Long-Short-Term Memory models. It has a robust data preprocessing pipeline with feature engineering, selection, and data balancing techniques. Our AI models demonstrated comparable performance to models trained using CL, IID, and non-IID approaches. They showcased advantages in reduced complexity and faster training time, making them well-suited for cloud-edge architectures. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2208.10993v3-abstract-full').style.display = 'none'; document.getElementById('2208.10993v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 January, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 23 August, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Preprint of International Symposium on Algorithmic Aspects of Cloud Computing (ALGOCLOUD) 2023</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.13795">arXiv:2207.13795</a> <span> [<a href="https://arxiv.org/pdf/2207.13795">pdf</a>, <a href="https://arxiv.org/format/2207.13795">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Sectored DRAM: A Practical Energy-Efficient and High-Performance Fine-Grained DRAM Architecture </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Bostanci%2C+F+N">F. Nisa Bostanci</a>, <a href="/search/cs?searchtype=author&query=Oliveira%2C+G+F">Geraldo F. Oliveira</a>, <a href="/search/cs?searchtype=author&query=Tugrul%2C+Y+C">Yahya Can Tugrul</a>, <a href="/search/cs?searchtype=author&query=Bera%2C+R">Rahul Bera</a>, <a href="/search/cs?searchtype=author&query=Yaglikci%2C+A+G">A. Giray Yaglikci</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Ergin%2C+O">Oguz Ergin</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.13795v4-abstract-short" style="display: inline;"> We propose Sectored DRAM, a new, low-overhead DRAM substrate that reduces wasted energy by enabling fine-grained DRAM data transfers and DRAM row activation. Sectored DRAM leverages two key ideas to enable fine-grained data transfers and row activation at low chip area cost. First, a cache block transfer between main memory and the memory controller happens in a fixed number of clock cycles where… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.13795v4-abstract-full').style.display = 'inline'; document.getElementById('2207.13795v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.13795v4-abstract-full" style="display: none;"> We propose Sectored DRAM, a new, low-overhead DRAM substrate that reduces wasted energy by enabling fine-grained DRAM data transfers and DRAM row activation. Sectored DRAM leverages two key ideas to enable fine-grained data transfers and row activation at low chip area cost. First, a cache block transfer between main memory and the memory controller happens in a fixed number of clock cycles where only a small portion of the cache block (a word) is transferred in each cycle. Sectored DRAM augments the memory controller and the DRAM chip to execute cache block transfers in a variable number of clock cycles based on the workload access pattern with minor modifications to the memory controller's and the DRAM chip's circuitry. Second, a large DRAM row, by design, is already partitioned into smaller independent physically isolated regions. Sectored DRAM provides the memory controller with the ability to activate each such region based on the workload access pattern via small modifications to the DRAM chip's array access circuitry. Activating smaller regions of a large row relaxes DRAM power delivery constraints and allows the memory controller to schedule DRAM accesses faster. Compared to a system with coarse-grained DRAM, Sectored DRAM reduces the DRAM energy consumption of highly-memory-intensive workloads by up to (on average) 33% (20%) while improving their performance by up to (on average) 36% (17%). Sectored DRAM's DRAM energy savings, combined with its system performance improvement, allows system-wide energy savings of up to 23%. Sectored DRAM's DRAM chip area overhead is 1.7% the area of a modern DDR4 chip. We hope and believe that Sectored DRAM's ideas and results will help to enable more efficient and high-performance memory systems. To this end, we open source Sectored DRAM at https://github.com/CMU-SAFARI/Sectored-DRAM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.13795v4-abstract-full').style.display = 'none'; document.getElementById('2207.13795v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 9 June, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Extended version of paper that is to appear in ACM Transactions on Architecture and Code Optimization (ACM TACO)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2207.13358">arXiv:2207.13358</a> <span> [<a href="https://arxiv.org/pdf/2207.13358">pdf</a>, <a href="https://arxiv.org/format/2207.13358">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Self-Managing DRAM: A Low-Cost Framework for Enabling Autonomous and Efficient in-DRAM Operations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Yaglikci%2C+A+G">A. Giray Yaglikci</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+H">Haocong Luo</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2207.13358v8-abstract-short" style="display: inline;"> The memory controller is in charge of managing DRAM maintenance operations (e.g., refresh, RowHammer protection, memory scrubbing) to reliably operate modern DRAM chips. Implementing new maintenance operations often necessitates modifications in the DRAM interface, memory controller, and potentially other system components. Such modifications are only possible with a new DRAM standard, which takes… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.13358v8-abstract-full').style.display = 'inline'; document.getElementById('2207.13358v8-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2207.13358v8-abstract-full" style="display: none;"> The memory controller is in charge of managing DRAM maintenance operations (e.g., refresh, RowHammer protection, memory scrubbing) to reliably operate modern DRAM chips. Implementing new maintenance operations often necessitates modifications in the DRAM interface, memory controller, and potentially other system components. Such modifications are only possible with a new DRAM standard, which takes a long time to develop, likely leading to slow progress in the adoption of new architectural techniques in DRAM chips. We propose a new low-cost DRAM architecture, Self-Managing DRAM (SMD), that enables autonomous in-DRAM maintenance operations by transferring the responsibility for controlling maintenance operations from the memory controller to the SMD chip. To enable autonomous maintenance operations, we make a single modification to the DRAM interface, such that an SMD chip rejects memory controller accesses to DRAM regions under maintenance, while allowing memory accesses to others. Thus, SMD enables 1) implementing new in-DRAM maintenance mechanisms (or modifying existing ones) with no further changes in the DRAM interface or other system components, and 2) overlapping the latency of a maintenance operation in one DRAM region with the latency of accessing data in another. We evaluate SMD and show that it 1) can be implemented without adding new pins to the DDRx interface with low latency and area overhead, 2) achieves 4.1% average speedup across 20 four-core memory-intensive workloads over a DDR4-based system/DRAM co-design technique that intelligently parallelizes maintenance operations with memory accesses, and 3) guarantees forward progress for rejected memory accesses. We believe and hope SMD can enable innovations in DRAM architecture to rapidly come to fruition. We open source all SMD source code and data at https://github.com/CMU-SAFARI/SelfManagingDRAM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2207.13358v8-abstract-full').style.display = 'none'; document.getElementById('2207.13358v8-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 October, 2024; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 27 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Extended version of MICRO 2024 paper titled "Self-Managing DRAM: A Low-Cost Framework for Enabling Autonomous and Efficient DRAM Maintenance Operations''</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.09999">arXiv:2206.09999</a> <span> [<a href="https://arxiv.org/pdf/2206.09999">pdf</a>, <a href="https://arxiv.org/format/2206.09999">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Understanding RowHammer Under Reduced Wordline Voltage: An Experimental Study Using Real DRAM Devices </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ya%C4%9Fl%C4%B1k%C3%A7%C4%B1%2C+A+G">A. Giray Ya臒l谋k莽谋</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+H">Haocong Luo</a>, <a href="/search/cs?searchtype=author&query=de+Oliviera%2C+G+F">Geraldo F. de Oliviera</a>, <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Park%2C+J">Jisung Park</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+J+S">Jeremie S. Kim</a>, <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.09999v1-abstract-short" style="display: inline;"> RowHammer is a circuit-level DRAM vulnerability, where repeatedly activating and precharging a DRAM row, and thus alternating the voltage of a row's wordline between low and high voltage levels, can cause bit flips in physically nearby rows. Recent DRAM chips are more vulnerable to RowHammer: with technology node scaling, the minimum number of activate-precharge cycles to induce a RowHammer bit fl… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.09999v1-abstract-full').style.display = 'inline'; document.getElementById('2206.09999v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.09999v1-abstract-full" style="display: none;"> RowHammer is a circuit-level DRAM vulnerability, where repeatedly activating and precharging a DRAM row, and thus alternating the voltage of a row's wordline between low and high voltage levels, can cause bit flips in physically nearby rows. Recent DRAM chips are more vulnerable to RowHammer: with technology node scaling, the minimum number of activate-precharge cycles to induce a RowHammer bit flip reduces and the RowHammer bit error rate increases. Therefore, it is critical to develop effective and scalable approaches to protect modern DRAM systems against RowHammer. To enable such solutions, it is essential to develop a deeper understanding of the RowHammer vulnerability of modern DRAM chips. However, even though the voltage toggling on a wordline is a key determinant of RowHammer vulnerability, no prior work experimentally demonstrates the effect of wordline voltage (VPP) on the RowHammer vulnerability. Our work closes this gap in understanding. This is the first work to experimentally demonstrate on 272 real DRAM chips that lowering VPP reduces a DRAM chip's RowHammer vulnerability. We show that lowering VPP 1) increases the number of activate-precharge cycles needed to induce a RowHammer bit flip by up to 85.8% with an average of 7.4% across all tested chips and 2) decreases the RowHammer bit error rate by up to 66.9% with an average of 15.2% across all tested chips. At the same time, reducing VPP marginally worsens a DRAM cell's access latency, charge restoration, and data retention time within the guardbands of system-level nominal timing parameters for 208 out of 272 tested chips. We conclude that reducing VPP is a promising strategy for reducing a DRAM chip's RowHammer vulnerability without requiring modifications to DRAM chips. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.09999v1-abstract-full').style.display = 'none'; document.getElementById('2206.09999v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 20 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in DSN 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2206.00263">arXiv:2206.00263</a> <span> [<a href="https://arxiv.org/pdf/2206.00263">pdf</a>, <a href="https://arxiv.org/format/2206.00263">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> PiDRAM: An FPGA-based Framework for End-to-end Evaluation of Processing-in-DRAM Techniques </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Luna%2C+J+G">Juan Gomez Luna</a>, <a href="/search/cs?searchtype=author&query=Kanellopoulos%2C+K">Konstantinos Kanellopoulos</a>, <a href="/search/cs?searchtype=author&query=Salami%2C+B">Behzad Salami</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Ergin%2C+O">Oguz Ergin</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2206.00263v1-abstract-short" style="display: inline;"> DRAM-based main memory is used in nearly all computing systems as a major component. One way of overcoming the main memory bottleneck is to move computation near memory, a paradigm known as processing-in-memory (PiM). Recent PiM techniques provide a promising way to improve the performance and energy efficiency of existing and future systems at no additional DRAM hardware cost. We develop the Pr… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.00263v1-abstract-full').style.display = 'inline'; document.getElementById('2206.00263v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2206.00263v1-abstract-full" style="display: none;"> DRAM-based main memory is used in nearly all computing systems as a major component. One way of overcoming the main memory bottleneck is to move computation near memory, a paradigm known as processing-in-memory (PiM). Recent PiM techniques provide a promising way to improve the performance and energy efficiency of existing and future systems at no additional DRAM hardware cost. We develop the Processing-in-DRAM (PiDRAM) framework, the first flexible, end-to-end, and open source framework that enables system integration studies and evaluation of real PiM techniques using real DRAM chips. We demonstrate a prototype of PiDRAM on an FPGA-based platform (Xilinx ZC706) that implements an open-source RISC-V system (Rocket Chip). To demonstrate the flexibility and ease of use of PiDRAM, we implement two PiM techniques: (1) RowClone, an in-DRAM copy and initialization mechanism (using command sequences proposed by ComputeDRAM), and (2) D-RaNGe, an in-DRAM true random number generator based on DRAM activation-latency failures. Our end-to-end evaluation of RowClone shows up to 14.6X speedup for copy and 12.6X initialization operations over CPU copy (i.e., conventional memcpy) and initialization (i.e., conventional calloc) operations. Our implementation of D-RaNGe provides high throughput true random numbers, reaching 8.30 Mb/s throughput. Over the Verilog and C++ basis provided by PiDRAM, implementing the required hardware and software components, implementing RowClone end-to-end takes 198 (565) and implementing D-RaNGe end-to-end takes 190 (78) lines of Verilog (C++) code. PiDRAM is open sourced on Github: https://github.com/CMU-SAFARI/PiDRAM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2206.00263v1-abstract-full').style.display = 'none'; document.getElementById('2206.00263v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2022. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in ISVLSI 2022 Special Session on Processing in Memory. arXiv admin note: text overlap with arXiv:2111.00082</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2201.01385">arXiv:2201.01385</a> <span> [<a href="https://arxiv.org/pdf/2201.01385">pdf</a>, <a href="https://arxiv.org/format/2201.01385">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> DR-STRaNGe: End-to-End System Design for DRAM-based True Random Number Generators </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Bostanc%C4%B1%2C+F+N">F. Nisa Bostanc谋</a>, <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Ya%C4%9Fl%C4%B1k%C3%A7%C4%B1%2C+A+G">A. Giray Ya臒l谋k莽谋</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+J+S">Jeremie S. Kim</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Ergin%2C+O">O臒uz Ergin</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2201.01385v5-abstract-short" style="display: inline;"> Random number generation is an important task in a wide variety of critical applications including cryptographic algorithms, scientific simulations, and industrial testing tools. True Random Number Generators (TRNGs) produce truly random data by sampling a physical entropy source that typically requires custom hardware and suffers from long latency. To enable high-bandwidth and low-latency TRNGs o… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.01385v5-abstract-full').style.display = 'inline'; document.getElementById('2201.01385v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2201.01385v5-abstract-full" style="display: none;"> Random number generation is an important task in a wide variety of critical applications including cryptographic algorithms, scientific simulations, and industrial testing tools. True Random Number Generators (TRNGs) produce truly random data by sampling a physical entropy source that typically requires custom hardware and suffers from long latency. To enable high-bandwidth and low-latency TRNGs on commodity devices, recent works propose TRNGs that use DRAM as an entropy source. Although prior works demonstrate promising DRAM-based TRNGs, integration of such mechanisms into real systems poses challenges. We identify three challenges for using DRAM-based TRNGs in current systems: (1) generating random numbers can degrade system performance by slowing down concurrently-running applications due to the interference between RNG and regular memory operations in the memory controller (i.e., RNG interference), (2) this RNG interference can degrade system fairness by unfairly prioritizing applications that intensively use random numbers (i.e., RNG applications), and (3) RNG applications can experience significant slowdowns due to the high RNG latency. We propose DR-STRaNGe, an end-to-end system design for DRAM-based TRNGs that (1) reduces the RNG interference by separating RNG requests from regular requests in the memory controller, (2) improves the system fairness with an RNG-aware memory request scheduler, and (3) hides the large TRNG latencies using a random number buffering mechanism with a new DRAM idleness predictor that accurately identifies idle DRAM periods. We evaluate DR-STRaNGe using a set of 186 multiprogrammed workloads. Compared to an RNG-oblivious baseline system, DR-STRaNGe improves the average performance of non-RNG and RNG applications by 17.9% and 25.1%, respectively. DR-STRaNGe improves average system fairness by 32.1% and reduces average energy consumption by 21%. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2201.01385v5-abstract-full').style.display = 'none'; document.getElementById('2201.01385v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 June, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 4 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> January 2022. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2111.00082">arXiv:2111.00082</a> <span> [<a href="https://arxiv.org/pdf/2111.00082">pdf</a>, <a href="https://arxiv.org/format/2111.00082">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> PiDRAM: A Holistic End-to-end FPGA-based Framework for Processing-in-DRAM </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Luna%2C+J+G">Juan G贸mez Luna</a>, <a href="/search/cs?searchtype=author&query=Kanellopoulos%2C+K">Konstantinos Kanellopoulos</a>, <a href="/search/cs?searchtype=author&query=Salami%2C+B">Behzad Salami</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Ergin%2C+O">O臒uz Ergin</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2111.00082v6-abstract-short" style="display: inline;"> Processing-using-memory (PuM) techniques leverage the analog operation of memory cells to perform computation. Several recent works have demonstrated PuM techniques in off-the-shelf DRAM devices. Since DRAM is the dominant memory technology as main memory in current computing systems, these PuM techniques represent an opportunity for alleviating the data movement bottleneck at very low cost. Howev… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.00082v6-abstract-full').style.display = 'inline'; document.getElementById('2111.00082v6-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2111.00082v6-abstract-full" style="display: none;"> Processing-using-memory (PuM) techniques leverage the analog operation of memory cells to perform computation. Several recent works have demonstrated PuM techniques in off-the-shelf DRAM devices. Since DRAM is the dominant memory technology as main memory in current computing systems, these PuM techniques represent an opportunity for alleviating the data movement bottleneck at very low cost. However, system integration of PuM techniques imposes non-trivial challenges that are yet to be solved. Design space exploration of potential solutions to the PuM integration challenges requires appropriate tools to develop necessary hardware and software components. Unfortunately, current specialized DRAM-testing platforms, or system simulators do not provide the flexibility and/or the holistic system view that is necessary to deal with PuM integration challenges. We design and develop PiDRAM, the first flexible end-to-end framework that enables system integration studies and evaluation of real PuM techniques. PiDRAM provides software and hardware components to rapidly integrate PuM techniques across the whole system software and hardware stack (e.g., necessary modifications in the operating system, memory controller). We implement PiDRAM on an FPGA-based platform along with an open-source RISC-V system. Using PiDRAM, we implement and evaluate two state-of-the-art PuM techniques: in-DRAM (i) copy and initialization, (ii) true random number generation. Our results show that the in-memory copy and initialization techniques can improve the performance of bulk copy operations by 12.6x and bulk initialization operations by 14.6x on a real system. Implementing the true random number generator requires only 190 lines of Verilog and 74 lines of C code using PiDRAM's software and hardware components. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2111.00082v6-abstract-full').style.display = 'none'; document.getElementById('2111.00082v6-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 September, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 29 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> November 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in ACM Transactions on Architecture and Code Optimization</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.10603">arXiv:2110.10603</a> <span> [<a href="https://arxiv.org/pdf/2110.10603">pdf</a>, <a href="https://arxiv.org/format/2110.10603">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Uncovering In-DRAM RowHammer Protection Mechanisms: A New Methodology, Custom RowHammer Patterns, and Implications </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Tugrul%2C+Y+C">Yahya Can Tugrul</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+J+S">Jeremie S. Kim</a>, <a href="/search/cs?searchtype=author&query=van+der+Veen%2C+V">Victor van der Veen</a>, <a href="/search/cs?searchtype=author&query=Razavi%2C+K">Kaveh Razavi</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.10603v2-abstract-short" style="display: inline;"> The RowHammer vulnerability in DRAM is a critical threat to system security. To protect against RowHammer, vendors commit to security-through-obscurity: modern DRAM chips rely on undocumented, proprietary, on-die mitigations, commonly known as Target Row Refresh (TRR). At a high level, TRR detects and refreshes potential RowHammer-victim rows, but its exact implementations are not openly disclosed… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10603v2-abstract-full').style.display = 'inline'; document.getElementById('2110.10603v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.10603v2-abstract-full" style="display: none;"> The RowHammer vulnerability in DRAM is a critical threat to system security. To protect against RowHammer, vendors commit to security-through-obscurity: modern DRAM chips rely on undocumented, proprietary, on-die mitigations, commonly known as Target Row Refresh (TRR). At a high level, TRR detects and refreshes potential RowHammer-victim rows, but its exact implementations are not openly disclosed. Security guarantees of TRR mechanisms cannot be easily studied due to their proprietary nature. To assess the security guarantees of recent DRAM chips, we present Uncovering TRR (U-TRR), an experimental methodology to analyze in-DRAM TRR implementations. U-TRR is based on the new observation that data retention failures in DRAM enable a side channel that leaks information on how TRR refreshes potential victim rows. U-TRR allows us to (i) understand how logical DRAM rows are laid out physically in silicon; (ii) study undocumented on-die TRR mechanisms; and (iii) combine (i) and (ii) to evaluate the RowHammer security guarantees of modern DRAM chips. We show how U-TRR allows us to craft RowHammer access patterns that successfully circumvent the TRR mechanisms employed in 45 DRAM modules of the three major DRAM vendors. We find that the DRAM modules we analyze are vulnerable to RowHammer, having bit flips in up to 99.9% of all DRAM rows. We make U-TRR source code openly and freely available at [106]. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10603v2-abstract-full').style.display = 'none'; document.getElementById('2110.10603v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 20 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work is to appear at the 54th IEEE/ACM International Symposium on Microarchitecture (MICRO 2021)</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.10291">arXiv:2110.10291</a> <span> [<a href="https://arxiv.org/pdf/2110.10291">pdf</a>, <a href="https://arxiv.org/format/2110.10291">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1145/3466752.3480069">10.1145/3466752.3480069 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> A Deeper Look into RowHammer`s Sensitivities: Experimental Analysis of Real DRAM Chips and Implications on Future Attacks and Defenses </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Ya%C4%9Fl%C4%B1k%C3%A7%C4%B1%2C+A+G">Abdullah Giray Ya臒l谋k莽谋</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+H">Haocong Luo</a>, <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Park%2C+J">Jisung Park</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+J+S">Jeremie S. Kim</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.10291v1-abstract-short" style="display: inline;"> RowHammer is a circuit-level DRAM vulnerability where repeatedly accessing (i.e., hammering) a DRAM row can cause bit flips in physically nearby rows. The RowHammer vulnerability worsens as DRAM cell size and cell-to-cell spacing shrink. Recent studies demonstrate that modern DRAM chips, including chips previously marketed as RowHammer-safe, are even more vulnerable to RowHammer than older chips s… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10291v1-abstract-full').style.display = 'inline'; document.getElementById('2110.10291v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.10291v1-abstract-full" style="display: none;"> RowHammer is a circuit-level DRAM vulnerability where repeatedly accessing (i.e., hammering) a DRAM row can cause bit flips in physically nearby rows. The RowHammer vulnerability worsens as DRAM cell size and cell-to-cell spacing shrink. Recent studies demonstrate that modern DRAM chips, including chips previously marketed as RowHammer-safe, are even more vulnerable to RowHammer than older chips such that the required hammer count to cause a bit flip has reduced by more than 10X in the last decade. Therefore, it is essential to develop a better understanding and in-depth insights into the RowHammer vulnerability of modern DRAM chips to more effectively secure current and future systems. Our goal in this paper is to provide insights into fundamental properties of the RowHammer vulnerability that are not yet rigorously studied by prior works, but can potentially be $i$) exploited to develop more effective RowHammer attacks or $ii$) leveraged to design more effective and efficient defense mechanisms. To this end, we present an experimental characterization using 248~DDR4 and 24~DDR3 modern DRAM chips from four major DRAM manufacturers demonstrating how the RowHammer effects vary with three fundamental properties: 1)~DRAM chip temperature, 2)~aggressor row active time, and 3)~victim DRAM cell's physical location. Among our 16 new observations, we highlight that a RowHammer bit flip 1)~is very likely to occur in a bounded range, specific to each DRAM cell (e.g., 5.4% of the vulnerable DRAM cells exhibit errors in the range 70C to 90C), 2)~is more likely to occur if the aggressor row is active for longer time (e.g., RowHammer vulnerability increases by 36% if we keep a DRAM row active for 15 column accesses), and 3)~is more likely to occur in certain physical regions of the DRAM module under attack (e.g., 5% of the rows are 2x more vulnerable than the remaining 95% of the rows). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.10291v1-abstract-full').style.display = 'none'; document.getElementById('2110.10291v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 19 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">A shorter version of this work is to appear at the 54th Annual IEEE/ACM International Symposium on Microarchitecture (MICRO-54), 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2110.04260">arXiv:2110.04260</a> <span> [<a href="https://arxiv.org/pdf/2110.04260">pdf</a>, <a href="https://arxiv.org/format/2110.04260">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Taming Sparsely Activated Transformer with Stochastic Experts </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Zuo%2C+S">Simiao Zuo</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+X">Xiaodong Liu</a>, <a href="/search/cs?searchtype=author&query=Jiao%2C+J">Jian Jiao</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+Y+J">Young Jin Kim</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hany Hassan</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+R">Ruofei Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhao%2C+T">Tuo Zhao</a>, <a href="/search/cs?searchtype=author&query=Gao%2C+J">Jianfeng Gao</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2110.04260v3-abstract-short" style="display: inline;"> Sparsely activated models (SAMs), such as Mixture-of-Experts (MoE), can easily scale to have outrageously large amounts of parameters without significant increase in computational cost. However, SAMs are reported to be parameter inefficient such that larger models do not always lead to better performance. While most on-going research focuses on improving SAMs models by exploring methods of routing… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.04260v3-abstract-full').style.display = 'inline'; document.getElementById('2110.04260v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2110.04260v3-abstract-full" style="display: none;"> Sparsely activated models (SAMs), such as Mixture-of-Experts (MoE), can easily scale to have outrageously large amounts of parameters without significant increase in computational cost. However, SAMs are reported to be parameter inefficient such that larger models do not always lead to better performance. While most on-going research focuses on improving SAMs models by exploring methods of routing inputs to experts, our analysis reveals that such research might not lead to the solution we expect, i.e., the commonly-used routing methods based on gating mechanisms do not work better than randomly routing inputs to experts. In this paper, we propose a new expert-based model, THOR (Transformer witH StOchastic ExpeRts). Unlike classic expert-based models, such as the Switch Transformer, experts in THOR are randomly activated for each input during training and inference. THOR models are trained using a consistency regularized loss, where experts learn not only from training data but also from other experts as teachers, such that all the experts make consistent predictions. We validate the effectiveness of THOR on machine translation tasks. Results show that THOR models are more parameter efficient in that they significantly outperform the Transformer and MoE models across various settings. For example, in multilingual translation, THOR outperforms the Switch Transformer by 2 BLEU scores, and obtains the same BLEU score as that of a state-of-the-art MoE model that is 18 times larger. Our code is publicly available at: https://github.com/microsoft/Stochastic-Mixture-of-Experts. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2110.04260v3-abstract-full').style.display = 'none'; document.getElementById('2110.04260v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 February, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 October, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ICLR 2022</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.04778">arXiv:2109.04778</a> <span> [<a href="https://arxiv.org/pdf/2109.04778">pdf</a>, <a href="https://arxiv.org/format/2109.04778">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Artificial Intelligence">cs.AI</span> </div> </div> <p class="title is-5 mathjax"> Improving Multilingual Translation by Representation and Gradient Regularization </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Yang%2C+Y">Yilin Yang</a>, <a href="/search/cs?searchtype=author&query=Eriguchi%2C+A">Akiko Eriguchi</a>, <a href="/search/cs?searchtype=author&query=Muzio%2C+A">Alexandre Muzio</a>, <a href="/search/cs?searchtype=author&query=Tadepalli%2C+P">Prasad Tadepalli</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+S">Stefan Lee</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hany Hassan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.04778v2-abstract-short" style="display: inline;"> Multilingual Neural Machine Translation (NMT) enables one model to serve all translation directions, including ones that are unseen during training, i.e. zero-shot translation. Despite being theoretically attractive, current models often produce low quality translations -- commonly failing to even produce outputs in the right target language. In this work, we observe that off-target translation is… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.04778v2-abstract-full').style.display = 'inline'; document.getElementById('2109.04778v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.04778v2-abstract-full" style="display: none;"> Multilingual Neural Machine Translation (NMT) enables one model to serve all translation directions, including ones that are unseen during training, i.e. zero-shot translation. Despite being theoretically attractive, current models often produce low quality translations -- commonly failing to even produce outputs in the right target language. In this work, we observe that off-target translation is dominant even in strong multilingual systems, trained on massive multilingual corpora. To address this issue, we propose a joint approach to regularize NMT models at both representation-level and gradient-level. At the representation level, we leverage an auxiliary target language prediction task to regularize decoder outputs to retain information about the target language. At the gradient level, we leverage a small amount of direct data (in thousands of sentence pairs) to regularize model gradients. Our results demonstrate that our approach is highly effective in both reducing off-target translation occurrences and improving zero-shot translation performance by +5.59 and +10.38 BLEU on WMT and OPUS datasets respectively. Moreover, experiments show that our method also works well when the small amount of direct data is not available. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.04778v2-abstract-full').style.display = 'none'; document.getElementById('2109.04778v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 18 January, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 10 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">EMNLP 2021 (Oral). Code and data: https://github.com/yilinyang7/fairseq_multi_fix</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2109.00271">arXiv:2109.00271</a> <span> [<a href="https://arxiv.org/pdf/2109.00271">pdf</a>, <a href="https://arxiv.org/format/2109.00271">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Discovering Representation Sprachbund For Multilingual Pre-Training </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Fan%2C+Y">Yimin Fan</a>, <a href="/search/cs?searchtype=author&query=Liang%2C+Y">Yaobo Liang</a>, <a href="/search/cs?searchtype=author&query=Muzio%2C+A">Alexandre Muzio</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hany Hassan</a>, <a href="/search/cs?searchtype=author&query=Li%2C+H">Houqiang Li</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+M">Ming Zhou</a>, <a href="/search/cs?searchtype=author&query=Duan%2C+N">Nan Duan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2109.00271v1-abstract-short" style="display: inline;"> Multilingual pre-trained models have demonstrated their effectiveness in many multilingual NLP tasks and enabled zero-shot or few-shot transfer from high-resource languages to low resource ones. However, due to significant typological differences and contradictions between some languages, such models usually perform poorly on many languages and cross-lingual settings, which shows the difficulty of… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.00271v1-abstract-full').style.display = 'inline'; document.getElementById('2109.00271v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2109.00271v1-abstract-full" style="display: none;"> Multilingual pre-trained models have demonstrated their effectiveness in many multilingual NLP tasks and enabled zero-shot or few-shot transfer from high-resource languages to low resource ones. However, due to significant typological differences and contradictions between some languages, such models usually perform poorly on many languages and cross-lingual settings, which shows the difficulty of learning a single model to handle massive diverse languages well at the same time. To alleviate this issue, we present a new multilingual pre-training pipeline. We propose to generate language representation from multilingual pre-trained models and conduct linguistic analysis to show that language representation similarity reflect linguistic similarity from multiple perspectives, including language family, geographical sprachbund, lexicostatistics and syntax. Then we cluster all the target languages into multiple groups and name each group as a representation sprachbund. Thus, languages in the same representation sprachbund are supposed to boost each other in both pre-training and fine-tuning as they share rich linguistic similarity. We pre-train one multilingual model for each representation sprachbund. Experiments are conducted on cross-lingual benchmarks and significant improvements are achieved compared to strong baselines. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2109.00271v1-abstract-full').style.display = 'none'; document.getElementById('2109.00271v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 1 September, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To Appear at the Findings of EMNLP2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.05632">arXiv:2106.05632</a> <span> [<a href="https://arxiv.org/pdf/2106.05632">pdf</a>, <a href="https://arxiv.org/format/2106.05632">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> CODIC: A Low-Cost Substrate for Enabling Custom In-DRAM Functionalities and Optimizations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yaohua Wang</a>, <a href="/search/cs?searchtype=author&query=Sadrosadati%2C+M">Mohammad Sadrosadati</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+J+S">Jeremie S. Kim</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Puddu%2C+I">Ivan Puddu</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+H">Haocong Luo</a>, <a href="/search/cs?searchtype=author&query=Razavi%2C+K">Kaveh Razavi</a>, <a href="/search/cs?searchtype=author&query=G%C3%B3mez-Luna%2C+J">Juan G贸mez-Luna</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Mansouri-Ghiasi%2C+N">Nika Mansouri-Ghiasi</a>, <a href="/search/cs?searchtype=author&query=Ghose%2C+S">Saugata Ghose</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.05632v1-abstract-short" style="display: inline;"> DRAM is the dominant main memory technology used in modern computing systems. Computing systems implement a memory controller that interfaces with DRAM via DRAM commands. DRAM executes the given commands using internal components (e.g., access transistors, sense amplifiers) that are orchestrated by DRAM internal timings, which are fixed foreach DRAM command. Unfortunately, the use of fixed interna… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.05632v1-abstract-full').style.display = 'inline'; document.getElementById('2106.05632v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.05632v1-abstract-full" style="display: none;"> DRAM is the dominant main memory technology used in modern computing systems. Computing systems implement a memory controller that interfaces with DRAM via DRAM commands. DRAM executes the given commands using internal components (e.g., access transistors, sense amplifiers) that are orchestrated by DRAM internal timings, which are fixed foreach DRAM command. Unfortunately, the use of fixed internal timings limits the types of operations that DRAM can perform and hinders the implementation of new functionalities and custom mechanisms that improve DRAM reliability, performance and energy. To overcome these limitations, we propose enabling programmable DRAM internal timings for controlling in-DRAM components. To this end, we design CODIC, a new low-cost DRAM substrate that enables fine-grained control over four previously fixed internal DRAM timings that are key to many DRAM operations. We implement CODIC with only minimal changes to the DRAM chip and the DDRx interface. To demonstrate the potential of CODIC, we propose two new CODIC-based security mechanisms that outperform state-of-the-art mechanisms in several ways: (1) a new DRAM Physical Unclonable Function (PUF) that is more robust and has significantly higher throughput than state-of-the-art DRAM PUFs, and (2) the first cold boot attack prevention mechanism that does not introduce any performance or energy overheads at runtime. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.05632v1-abstract-full').style.display = 'none'; document.getElementById('2106.05632v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 10 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">Extended version of an ISCA 2021 paper</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> B.3; K.6.5 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2106.04575">arXiv:2106.04575</a> <span> [<a href="https://arxiv.org/pdf/2106.04575">pdf</a>, <a href="https://arxiv.org/format/2106.04575">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> DNS attack mitigation Using OpenStack Isolation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=hassan%2C+H+u">Hassnain ul hassan</a>, <a href="/search/cs?searchtype=author&query=Nor%2C+R+M">Rizal Mohd Nor</a>, <a href="/search/cs?searchtype=author&query=Amiruzzaman%2C+M">Md Amiruzzaman</a>, <a href="/search/cs?searchtype=author&query=Wani%2C+S">Sharyar Wani</a>, <a href="/search/cs?searchtype=author&query=Islam%2C+M+R">Md. Rajibul Islam</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2106.04575v3-abstract-short" style="display: inline;"> The Domain Name System (DNS) is essential for the Internet, giving a mechanism to resolve hostnames into Internet Protocol (IP) addresses. DNS is known as the world's largest distributed database that manages hostnames and Internet Protocol. By having the DNS, only simple names that can be easily memorized will be used and then the domain name system will map it into the numeric Internet Protocol… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.04575v3-abstract-full').style.display = 'inline'; document.getElementById('2106.04575v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2106.04575v3-abstract-full" style="display: none;"> The Domain Name System (DNS) is essential for the Internet, giving a mechanism to resolve hostnames into Internet Protocol (IP) addresses. DNS is known as the world's largest distributed database that manages hostnames and Internet Protocol. By having the DNS, only simple names that can be easily memorized will be used and then the domain name system will map it into the numeric Internet Protocol addresses that are used by computers to communicate. This research aims to propose a model for the development of a private cloud infrastructure to host DNS. The cloud infrastructure will be created using the OpenStack software platform where each server will be hosted separately in a different virtual machine. Virtual network architecture will be created using the Software Defined Networking (SDN) approach and it will be secured using Firewall as a Service (FWaaS). By hosting DNS in private cloud infrastructure, the DNS servers will be out of reach by attackers which will prevent DNS attacks. Besides, available research had proven that the cloud is the best choice for DNS. A prototype had been implemented and evaluated for its efficiencies. The findings from the evaluation carried out shown a positive result. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2106.04575v3-abstract-full').style.display = 'none'; document.getElementById('2106.04575v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 28 August, 2021; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 June, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> June 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">6 pages, 3 figures, and 2 tables</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2105.08123">arXiv:2105.08123</a> <span> [<a href="https://arxiv.org/pdf/2105.08123">pdf</a>, <a href="https://arxiv.org/format/2105.08123">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> MetaSys: A Practical Open-Source Metadata Management System to Implement and Evaluate Cross-Layer Optimizations </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Vijaykumar%2C+N">Nandita Vijaykumar</a>, <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Kanellopoulos%2C+K">Konstantinos Kanellopoulos</a>, <a href="/search/cs?searchtype=author&query=Bostanc%C4%B1%2C+N">Nisa Bostanc谋</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Lotfi%2C+M">Mehrshad Lotfi</a>, <a href="/search/cs?searchtype=author&query=Gibbons%2C+P+B">Phillip B. Gibbons</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2105.08123v5-abstract-short" style="display: inline;"> This paper introduces the first open-source FPGA-based infrastructure, MetaSys, with a prototype in a RISC-V core, to enable the rapid implementation and evaluation of a wide range of cross-layer techniques in real hardware. Hardware-software cooperative techniques are powerful approaches to improve the performance, quality of service, and security of general-purpose processors. They are however t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.08123v5-abstract-full').style.display = 'inline'; document.getElementById('2105.08123v5-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2105.08123v5-abstract-full" style="display: none;"> This paper introduces the first open-source FPGA-based infrastructure, MetaSys, with a prototype in a RISC-V core, to enable the rapid implementation and evaluation of a wide range of cross-layer techniques in real hardware. Hardware-software cooperative techniques are powerful approaches to improve the performance, quality of service, and security of general-purpose processors. They are however typically challenging to rapidly implement and evaluate in real hardware as they require full-stack changes to the hardware, OS, system software, and instruction-set architecture (ISA). MetaSys implements a rich hardware-software interface and lightweight metadata support that can be used as a common basis to rapidly implement and evaluate new cross-layer techniques. We demonstrate MetaSys's versatility and ease-of-use by implementing and evaluating three cross-layer techniques for: (i) prefetching for graph analytics; (ii) bounds checking in memory unsafe languages, and (iii) return address protection in stack frames; each technique only requiring ~100 lines of Chisel code over MetaSys. Using MetaSys, we perform the first detailed experimental study to quantify the performance overheads of using a single metadata management system to enable multiple cross-layer optimizations in CPUs. We identify the key sources of bottlenecks and system inefficiency of a general metadata management system. We design MetaSys to minimize these inefficiencies and provide increased versatility compared to previously-proposed metadata systems. Using three use cases and a detailed characterization, we demonstrate that a common metadata management system can be used to efficiently support diverse cross-layer techniques in CPUs. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2105.08123v5-abstract-full').style.display = 'none'; document.getElementById('2105.08123v5-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 21 January, 2023; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 17 May, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">A shorter version of this work is to appear at the ACM Transactions on Architecture and Code Optimization (TACO). 27 pages, 15 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2104.06591">arXiv:2104.06591</a> <span> [<a href="https://arxiv.org/pdf/2104.06591">pdf</a>, <a href="https://arxiv.org/format/2104.06591">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Zero-Resource Multi-Dialectal Arabic Natural Language Understanding </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Khalifa%2C+M">Muhammad Khalifa</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hesham Hassan</a>, <a href="/search/cs?searchtype=author&query=Fahmy%2C+A">Aly Fahmy</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2104.06591v2-abstract-short" style="display: inline;"> A reasonable amount of annotated data is required for fine-tuning pre-trained language models (PLM) on downstream tasks. However, obtaining labeled examples for different language varieties can be costly. In this paper, we investigate the zero-shot performance on Dialectal Arabic (DA) when fine-tuning a PLM on modern standard Arabic (MSA) data only -- identifying a significant performance drop whe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.06591v2-abstract-full').style.display = 'inline'; document.getElementById('2104.06591v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2104.06591v2-abstract-full" style="display: none;"> A reasonable amount of annotated data is required for fine-tuning pre-trained language models (PLM) on downstream tasks. However, obtaining labeled examples for different language varieties can be costly. In this paper, we investigate the zero-shot performance on Dialectal Arabic (DA) when fine-tuning a PLM on modern standard Arabic (MSA) data only -- identifying a significant performance drop when evaluating such models on DA. To remedy such performance drop, we propose self-training with unlabeled DA data and apply it in the context of named entity recognition (NER), part-of-speech (POS) tagging, and sarcasm detection (SRD) on several DA varieties. Our results demonstrate the effectiveness of self-training with unlabeled DA data: improving zero-shot MSA-to-DA transfer by as large as $\sim$10\% F$_1$ (NER), 2\% accuracy (POS tagging), and 4.5\% F$_1$ (SRD). We conduct an ablation experiment and show that the performance boost observed directly results from the unlabeled DA examples used for self-training. Our work opens up opportunities for leveraging the relatively abundant labeled MSA datasets to develop DA models for zero and low-resource dialects. We also report new state-of-the-art performance on all three tasks and open-source our fine-tuned models for the research community. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2104.06591v2-abstract-full').style.display = 'none'; document.getElementById('2104.06591v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 May, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 April, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:2101.04758</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2102.05981">arXiv:2102.05981</a> <span> [<a href="https://arxiv.org/pdf/2102.05981">pdf</a>, <a href="https://arxiv.org/format/2102.05981">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/HPCA51647.2021.00037">10.1109/HPCA51647.2021.00037 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> BlockHammer: Preventing RowHammer at Low Cost by Blacklisting Rapidly-Accessed DRAM Rows </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ya%C4%9Fl%C4%B1k%C3%A7%C4%B1%2C+A+G">Abdullah Giray Ya臒l谋k莽谋</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+J+S">Jeremie S. Kim</a>, <a href="/search/cs?searchtype=author&query=Azizi%2C+R">Roknoddin Azizi</a>, <a href="/search/cs?searchtype=author&query=Olgun%2C+A">Ataberk Olgun</a>, <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Park%2C+J">Jisung Park</a>, <a href="/search/cs?searchtype=author&query=Kanellopoulos%2C+K">Konstantinos Kanellopoulos</a>, <a href="/search/cs?searchtype=author&query=Shahroodi%2C+T">Taha Shahroodi</a>, <a href="/search/cs?searchtype=author&query=Ghose%2C+S">Saugata Ghose</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2102.05981v2-abstract-short" style="display: inline;"> Aggressive memory density scaling causes modern DRAM devices to suffer from RowHammer, a phenomenon where rapidly activating a DRAM row can cause bit-flips in physically-nearby rows. Recent studies demonstrate that modern DRAM chips, including chips previously marketed as RowHammer-safe, are even more vulnerable to RowHammer than older chips. Many works show that attackers can exploit RowHammer bi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05981v2-abstract-full').style.display = 'inline'; document.getElementById('2102.05981v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2102.05981v2-abstract-full" style="display: none;"> Aggressive memory density scaling causes modern DRAM devices to suffer from RowHammer, a phenomenon where rapidly activating a DRAM row can cause bit-flips in physically-nearby rows. Recent studies demonstrate that modern DRAM chips, including chips previously marketed as RowHammer-safe, are even more vulnerable to RowHammer than older chips. Many works show that attackers can exploit RowHammer bit-flips to reliably mount system-level attacks to escalate privilege and leak private data. Therefore, it is critical to ensure RowHammer-safe operation on all DRAM-based systems. Unfortunately, state-of-the-art RowHammer mitigation mechanisms face two major challenges. First, they incur increasingly higher performance and/or area overheads when applied to more vulnerable DRAM chips. Second, they require either proprietary information about or modifications to the DRAM chip design. In this paper, we show that it is possible to efficiently and scalably prevent RowHammer bit-flips without knowledge of or modification to DRAM internals. We introduce BlockHammer, a low-cost, effective, and easy-to-adopt RowHammer mitigation mechanism that overcomes the two key challenges by selectively throttling memory accesses that could otherwise cause RowHammer bit-flips. The key idea of BlockHammer is to (1) track row activation rates using area-efficient Bloom filters and (2) use the tracking data to ensure that no row is ever activated rapidly enough to induce RowHammer bit-flips. By doing so, BlockHammer (1) makes it impossible for a RowHammer bit-flip to occur and (2) greatly reduces a RowHammer attack's impact on the performance of co-running benign applications. Compared to state-of-the-art RowHammer mitigation mechanisms, BlockHammer provides competitive performance and energy when the system is not under a RowHammer attack and significantly better performance and energy when the system is under attack. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2102.05981v2-abstract-full').style.display = 'none'; document.getElementById('2102.05981v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 July, 2022; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 11 February, 2021; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2021. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">A shorter version of this work is to appear at the 27th IEEE International Symposium on High-Performance Computer Architecture (HPCA-27), 2021</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2009.07985">arXiv:2009.07985</a> <span> [<a href="https://arxiv.org/pdf/2009.07985">pdf</a>, <a href="https://arxiv.org/format/2009.07985">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Bit-Exact ECC Recovery (BEER): Determining DRAM On-Die ECC Functions by Exploiting DRAM Data Retention Characteristics </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+J+S">Jeremie S. Kim</a>, <a href="/search/cs?searchtype=author&query=Shahroodi%2C+T">Taha Shahroodi</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2009.07985v1-abstract-short" style="display: inline;"> Increasing single-cell DRAM error rates have pushed DRAM manufacturers to adopt on-die error-correction coding (ECC), which operates entirely within a DRAM chip to improve factory yield. The on-die ECC function and its effects on DRAM reliability are considered trade secrets, so only the manufacturer knows precisely how on-die ECC alters the externally-visible reliability characteristics. Conseque… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.07985v1-abstract-full').style.display = 'inline'; document.getElementById('2009.07985v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2009.07985v1-abstract-full" style="display: none;"> Increasing single-cell DRAM error rates have pushed DRAM manufacturers to adopt on-die error-correction coding (ECC), which operates entirely within a DRAM chip to improve factory yield. The on-die ECC function and its effects on DRAM reliability are considered trade secrets, so only the manufacturer knows precisely how on-die ECC alters the externally-visible reliability characteristics. Consequently, on-die ECC obstructs third-party DRAM customers (e.g., test engineers, experimental researchers), who typically design, test, and validate systems based on these characteristics. To give third parties insight into precisely how on-die ECC transforms DRAM error patterns during error correction, we introduce Bit-Exact ECC Recovery (BEER), a new methodology for determining the full DRAM on-die ECC function (i.e., its parity-check matrix) without hardware tools, prerequisite knowledge about the DRAM chip or on-die ECC mechanism, or access to ECC metadata (e.g., error syndromes, parity information). BEER exploits the key insight that non-intrusively inducing data-retention errors with carefully-crafted test patterns reveals behavior that is unique to a specific ECC function. We use BEER to identify the ECC functions of 80 real LPDDR4 DRAM chips with on-die ECC from three major DRAM manufacturers. We evaluate BEER's correctness in simulation and performance on a real system to show that BEER is effective and practical across a wide range of on-die ECC functions. To demonstrate BEER's value, we propose and discuss several ways that third parties can use BEER to improve their design and testing practices. As a concrete example, we introduce and evaluate BEEP, the first error profiling methodology that uses the known on-die ECC function to recover the number and bit-exact locations of unobservable raw bit errors responsible for observable post-correction errors. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2009.07985v1-abstract-full').style.display = 'none'; document.getElementById('2009.07985v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 16 September, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">To appear in the MICRO 2020 conference proceedings</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2007.10987">arXiv:2007.10987</a> <span> [<a href="https://arxiv.org/pdf/2007.10987">pdf</a>, <a href="https://arxiv.org/format/2007.10987">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> IBM Federated Learning: an Enterprise Framework White Paper V0.1 </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ludwig%2C+H">Heiko Ludwig</a>, <a href="/search/cs?searchtype=author&query=Baracaldo%2C+N">Nathalie Baracaldo</a>, <a href="/search/cs?searchtype=author&query=Thomas%2C+G">Gegi Thomas</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+Y">Yi Zhou</a>, <a href="/search/cs?searchtype=author&query=Anwar%2C+A">Ali Anwar</a>, <a href="/search/cs?searchtype=author&query=Rajamoni%2C+S">Shashank Rajamoni</a>, <a href="/search/cs?searchtype=author&query=Ong%2C+Y">Yuya Ong</a>, <a href="/search/cs?searchtype=author&query=Radhakrishnan%2C+J">Jayaram Radhakrishnan</a>, <a href="/search/cs?searchtype=author&query=Verma%2C+A">Ashish Verma</a>, <a href="/search/cs?searchtype=author&query=Sinn%2C+M">Mathieu Sinn</a>, <a href="/search/cs?searchtype=author&query=Purcell%2C+M">Mark Purcell</a>, <a href="/search/cs?searchtype=author&query=Rawat%2C+A">Ambrish Rawat</a>, <a href="/search/cs?searchtype=author&query=Minh%2C+T">Tran Minh</a>, <a href="/search/cs?searchtype=author&query=Holohan%2C+N">Naoise Holohan</a>, <a href="/search/cs?searchtype=author&query=Chakraborty%2C+S">Supriyo Chakraborty</a>, <a href="/search/cs?searchtype=author&query=Whitherspoon%2C+S">Shalisha Whitherspoon</a>, <a href="/search/cs?searchtype=author&query=Steuer%2C+D">Dean Steuer</a>, <a href="/search/cs?searchtype=author&query=Wynter%2C+L">Laura Wynter</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hifaz Hassan</a>, <a href="/search/cs?searchtype=author&query=Laguna%2C+S">Sean Laguna</a>, <a href="/search/cs?searchtype=author&query=Yurochkin%2C+M">Mikhail Yurochkin</a>, <a href="/search/cs?searchtype=author&query=Agarwal%2C+M">Mayank Agarwal</a>, <a href="/search/cs?searchtype=author&query=Chuba%2C+E">Ebube Chuba</a>, <a href="/search/cs?searchtype=author&query=Abay%2C+A">Annie Abay</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2007.10987v1-abstract-short" style="display: inline;"> Federated Learning (FL) is an approach to conduct machine learning without centralizing training data in a single place, for reasons of privacy, confidentiality or data volume. However, solving federated machine learning problems raises issues above and beyond those of centralized machine learning. These issues include setting up communication infrastructure between parties, coordinating the learn… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.10987v1-abstract-full').style.display = 'inline'; document.getElementById('2007.10987v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2007.10987v1-abstract-full" style="display: none;"> Federated Learning (FL) is an approach to conduct machine learning without centralizing training data in a single place, for reasons of privacy, confidentiality or data volume. However, solving federated machine learning problems raises issues above and beyond those of centralized machine learning. These issues include setting up communication infrastructure between parties, coordinating the learning process, integrating party results, understanding the characteristics of the training data sets of different participating parties, handling data heterogeneity, and operating with the absence of a verification data set. IBM Federated Learning provides infrastructure and coordination for federated learning. Data scientists can design and run federated learning jobs based on existing, centralized machine learning models and can provide high-level instructions on how to run the federation. The framework applies to both Deep Neural Networks as well as ``traditional'' approaches for the most common machine learning libraries. {\proj} enables data scientists to expand their scope from centralized to federated machine learning, minimizing the learning curve at the outset while also providing the flexibility to deploy to different compute environments and design custom fusion algorithms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2007.10987v1-abstract-full').style.display = 'none'; document.getElementById('2007.10987v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 July, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">17 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> I.2.6; I.2.11 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.13121">arXiv:2005.13121</a> <span> [<a href="https://arxiv.org/pdf/2005.13121">pdf</a>, <a href="https://arxiv.org/format/2005.13121">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Revisiting RowHammer: An Experimental Analysis of Modern DRAM Devices and Mitigation Techniques </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kim%2C+J+S">Jeremie S. Kim</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Yaglikci%2C+A+G">A. Giray Yaglikci</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Azizi%2C+R">Roknoddin Azizi</a>, <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.13121v2-abstract-short" style="display: inline;"> In order to shed more light on how RowHammer affects modern and future devices at the circuit-level, we first present an experimental characterization of RowHammer on 1580 DRAM chips (408x DDR3, 652x DDR4, and 520x LPDDR4) from 300 DRAM modules (60x DDR3, 110x DDR4, and 130x LPDDR4) with RowHammer protection mechanisms disabled, spanning multiple different technology nodes from across each of the… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.13121v2-abstract-full').style.display = 'inline'; document.getElementById('2005.13121v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.13121v2-abstract-full" style="display: none;"> In order to shed more light on how RowHammer affects modern and future devices at the circuit-level, we first present an experimental characterization of RowHammer on 1580 DRAM chips (408x DDR3, 652x DDR4, and 520x LPDDR4) from 300 DRAM modules (60x DDR3, 110x DDR4, and 130x LPDDR4) with RowHammer protection mechanisms disabled, spanning multiple different technology nodes from across each of the three major DRAM manufacturers. Our studies definitively show that newer DRAM chips are more vulnerable to RowHammer: as device feature size reduces, the number of activations needed to induce a RowHammer bit flip also reduces, to as few as 9.6k (4.8k to two rows each) in the most vulnerable chip we tested. We evaluate five state-of-the-art RowHammer mitigation mechanisms using cycle-accurate simulation in the context of real data taken from our chips to study how the mitigation mechanisms scale with chip vulnerability. We find that existing mechanisms either are not scalable or suffer from prohibitively large performance overheads in projected future devices given our observed trends of RowHammer vulnerability. Thus, it is critical to research more effective solutions to RowHammer. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.13121v2-abstract-full').style.display = 'none'; document.getElementById('2005.13121v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 26 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.12775">arXiv:2005.12775</a> <span> [<a href="https://arxiv.org/pdf/2005.12775">pdf</a>, <a href="https://arxiv.org/format/2005.12775">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> CLR-DRAM: A Low-Cost DRAM Architecture Enabling Dynamic Capacity-Latency Trade-Off </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Luo%2C+H">Haocong Luo</a>, <a href="/search/cs?searchtype=author&query=Shahroodi%2C+T">Taha Shahroodi</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Yaglikci%2C+A+G">Abdullah Giray Yaglikci</a>, <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Park%2C+J">Jisung Park</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.12775v1-abstract-short" style="display: inline;"> DRAM is the prevalent main memory technology, but its long access latency can limit the performance of many workloads. Although prior works provide DRAM designs that reduce DRAM access latency, their reduced storage capacities hinder the performance of workloads that need large memory capacity. Because the capacity-latency trade-off is fixed at design time, previous works cannot achieve maximum pe… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.12775v1-abstract-full').style.display = 'inline'; document.getElementById('2005.12775v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.12775v1-abstract-full" style="display: none;"> DRAM is the prevalent main memory technology, but its long access latency can limit the performance of many workloads. Although prior works provide DRAM designs that reduce DRAM access latency, their reduced storage capacities hinder the performance of workloads that need large memory capacity. Because the capacity-latency trade-off is fixed at design time, previous works cannot achieve maximum performance under very different and dynamic workload demands. This paper proposes Capacity-Latency-Reconfigurable DRAM (CLR-DRAM), a new DRAM architecture that enables dynamic capacity-latency trade-off at low cost. CLR-DRAM allows dynamic reconfiguration of any DRAM row to switch between two operating modes: 1) max-capacity mode, where every DRAM cell operates individually to achieve approximately the same storage density as a density-optimized commodity DRAM chip and 2) high-performance mode, where two adjacent DRAM cells in a DRAM row and their sense amplifiers are coupled to operate as a single low-latency logical cell driven by a single logical sense amplifier. We implement CLR-DRAM by adding isolation transistors in each DRAM subarray. Our evaluations show that CLR-DRAM can improve system performance and DRAM energy consumption by 18.6% and 29.7% on average with four-core multiprogrammed workloads. We believe that CLR-DRAM opens new research directions for a system to adapt to the diverse and dynamically changing memory capacity and access latency demands of workloads. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.12775v1-abstract-full').style.display = 'none'; document.getElementById('2005.12775v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 26 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">This work is to appear at ISCA 2020</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2005.01589">arXiv:2005.01589</a> <span> [<a href="https://arxiv.org/pdf/2005.01589">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.5121/ijcnc.2020.12202">10.5121/ijcnc.2020.12202 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> An Adaptive Diffserv Approach To Support QoS In Network Mobility Nemo Environment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hussein%2C+L+F">Loay F. Hussein</a>, <a href="/search/cs?searchtype=author&query=Hashim%2C+A+A">Aisha-Hassan Abdalla Hashim</a>, <a href="/search/cs?searchtype=author&query=Habaebi%2C+M+H">Mohamed Hadi Habaebi</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+W+H">Wan Haslina Hassan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2005.01589v1-abstract-short" style="display: inline;"> Network mobility basic support (NEMO BS) protocol (RFC 3963) is an extension of Mobile IPv6. The NEMO BS embraced by the IETF working group to permit any node in the portable network to be accessible to the Internet despite the fact the network itself is roaming. This protocol likewise Mobile IPv6 does not deliver any kind of Quality of Service (QoS) guarantees to its clients. It can barely offer… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.01589v1-abstract-full').style.display = 'inline'; document.getElementById('2005.01589v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2005.01589v1-abstract-full" style="display: none;"> Network mobility basic support (NEMO BS) protocol (RFC 3963) is an extension of Mobile IPv6. The NEMO BS embraced by the IETF working group to permit any node in the portable network to be accessible to the Internet despite the fact the network itself is roaming. This protocol likewise Mobile IPv6 does not deliver any kind of Quality of Service (QoS) guarantees to its clients. It can barely offer the same level of services (i.e. Best-Effort) to all the users without obligation to the needs of applications. These propositions a challenge to real-time applications that demand a precise level of QoS pledge. The Differentiated Services have recently come to be the most widely used QoS support technology in IP networks due to its relative simplicity and scalability benefits. This paper proposes a new scheme to provide QoS to mobile network nodes within the NEMO context. The proposed scheme intends to reduce handover latency for the users of MNN as well as alleviates packet losses. The feasibility of the proposed enhancement is assessed by measuring its performance against the native NEMO BS standard protocol using the NS-2 simulator. The obtained results in the simulation study have demonstrated that the proposed scheme outperforms the standard NEMO BS protocol. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2005.01589v1-abstract-full').style.display = 'none'; document.getElementById('2005.01589v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 4 May, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">26 pages, 16 figures</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.02745">arXiv:2004.02745</a> <span> [<a href="https://arxiv.org/pdf/2004.02745">pdf</a>, <a href="https://arxiv.org/format/2004.02745">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Meta-Learning for Few-Shot NMT Adaptation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Sharaf%2C+A">Amr Sharaf</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hany Hassan</a>, <a href="/search/cs?searchtype=author&query=Daum%C3%A9%2C+H">Hal Daum茅 III</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.02745v1-abstract-short" style="display: inline;"> We present META-MT, a meta-learning approach to adapt Neural Machine Translation (NMT) systems in a few-shot setting. META-MT provides a new approach to make NMT models easily adaptable to many target domains with the minimal amount of in-domain data. We frame the adaptation of NMT systems as a meta-learning problem, where we learn to adapt to new unseen domains based on simulated offline meta-tra… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.02745v1-abstract-full').style.display = 'inline'; document.getElementById('2004.02745v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.02745v1-abstract-full" style="display: none;"> We present META-MT, a meta-learning approach to adapt Neural Machine Translation (NMT) systems in a few-shot setting. META-MT provides a new approach to make NMT models easily adaptable to many target domains with the minimal amount of in-domain data. We frame the adaptation of NMT systems as a meta-learning problem, where we learn to adapt to new unseen domains based on simulated offline meta-training domain adaptation tasks. We evaluate the proposed meta-learning strategy on ten domains with general large scale NMT systems. We show that META-MT significantly outperforms classical domain adaptation when very few in-domain examples are available. Our experiments shows that META-MT can outperform classical fine-tuning by up to 2.5 BLEU points after seeing only 4, 000 translated words (300 parallel sentences). <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.02745v1-abstract-full').style.display = 'none'; document.getElementById('2004.02745v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 6 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/2004.01807">arXiv:2004.01807</a> <span> [<a href="https://arxiv.org/pdf/2004.01807">pdf</a>, <a href="https://arxiv.org/format/2004.01807">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> TRRespass: Exploiting the Many Sides of Target Row Refresh </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Frigo%2C+P">Pietro Frigo</a>, <a href="/search/cs?searchtype=author&query=Vannacci%2C+E">Emanuele Vannacci</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=van+der+Veen%2C+V">Victor van der Veen</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a>, <a href="/search/cs?searchtype=author&query=Giuffrida%2C+C">Cristiano Giuffrida</a>, <a href="/search/cs?searchtype=author&query=Bos%2C+H">Herbert Bos</a>, <a href="/search/cs?searchtype=author&query=Razavi%2C+K">Kaveh Razavi</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="2004.01807v1-abstract-short" style="display: inline;"> After a plethora of high-profile RowHammer attacks, CPU and DRAM vendors scrambled to deliver what was meant to be the definitive hardware solution against the RowHammer problem: Target Row Refresh (TRR). A common belief among practitioners is that, for the latest generation of DDR4 systems that are protected by TRR, RowHammer is no longer an issue in practice. However, in reality, very little is… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.01807v1-abstract-full').style.display = 'inline'; document.getElementById('2004.01807v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="2004.01807v1-abstract-full" style="display: none;"> After a plethora of high-profile RowHammer attacks, CPU and DRAM vendors scrambled to deliver what was meant to be the definitive hardware solution against the RowHammer problem: Target Row Refresh (TRR). A common belief among practitioners is that, for the latest generation of DDR4 systems that are protected by TRR, RowHammer is no longer an issue in practice. However, in reality, very little is known about TRR. In this paper, we demystify the inner workings of TRR and debunk its security guarantees. We show that what is advertised as a single mitigation mechanism is actually a series of different solutions coalesced under the umbrella term TRR. We inspect and disclose, via a deep analysis, different existing TRR solutions and demonstrate that modern implementations operate entirely inside DRAM chips. Despite the difficulties of analyzing in-DRAM mitigations, we describe novel techniques for gaining insights into the operation of these mitigation mechanisms. These insights allow us to build TRRespass, a scalable black-box RowHammer fuzzer. TRRespass shows that even the latest generation DDR4 chips with in-DRAM TRR, immune to all known RowHammer attacks, are often still vulnerable to new TRR-aware variants of RowHammer that we develop. In particular, TRRespass finds that, on modern DDR4 modules, RowHammer is still possible when many aggressor rows are used (as many as 19 in some cases), with a method we generally refer to as Many-sided RowHammer. Overall, our analysis shows that 13 out of the 42 modules from all three major DRAM vendors are vulnerable to our TRR-aware RowHammer access patterns, and thus one can still mount existing state-of-the-art RowHammer attacks. In addition to DDR4, we also experiment with LPDDR4 chips and show that they are susceptible to RowHammer bit flips too. Our results provide concrete evidence that the pursuit of better RowHammer mitigations must continue. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('2004.01807v1-abstract-full').style.display = 'none'; document.getElementById('2004.01807v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 April, 2020; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> April 2020. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">16 pages, 16 figures, in proceedings IEEE S&P 2020</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> B.8.1 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1910.06672">arXiv:1910.06672</a> <span> [<a href="https://arxiv.org/pdf/1910.06672">pdf</a>, <a href="https://arxiv.org/format/1910.06672">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Refresh Triggered Computation: Improving the Energy Efficiency of Convolutional Neural Network Accelerators </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Jafri%2C+S+M+A+H">Syed M. A. H. Jafri</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Hemani%2C+A">Ahmed Hemani</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1910.06672v2-abstract-short" style="display: inline;"> To employ a Convolutional Neural Network (CNN) in an energy-constrained embedded system, it is critical for the CNN implementation to be highly energy efficient. Many recent studies propose CNN accelerator architectures with custom computation units that try to improve energy-efficiency and performance of CNNs by minimizing data transfers from DRAM-based main memory. However, in these architecture… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.06672v2-abstract-full').style.display = 'inline'; document.getElementById('1910.06672v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1910.06672v2-abstract-full" style="display: none;"> To employ a Convolutional Neural Network (CNN) in an energy-constrained embedded system, it is critical for the CNN implementation to be highly energy efficient. Many recent studies propose CNN accelerator architectures with custom computation units that try to improve energy-efficiency and performance of CNNs by minimizing data transfers from DRAM-based main memory. However, in these architectures, DRAM is still responsible for half of the overall energy consumption of the system, on average. A key factor of the high energy consumption of DRAM is the refresh overhead, which is estimated to consume 40% of the total DRAM energy. In this paper, we propose a new mechanism, Refresh Triggered Computation (RTC), that exploits the memory access patterns of CNN applications to reduce the number of refresh operations. We propose three RTC designs (min-RTC, mid-RTC, and full-RTC), each of which requires a different level of aggressiveness in terms of customization to the DRAM subsystem. All of our designs have small overhead. Even the most aggressive RTC design (i.e., full-RTC) imposes an area overhead of only 0.18% in a 16 Gb DRAM chip and can have less overhead for denser chips. Our experimental evaluation on six well-known CNNs show that RTC reduces average DRAM energy consumption by 24.4% and 61.3%, for the least aggressive and the most aggressive RTC implementations, respectively. Besides CNNs, we also evaluate our RTC mechanism on three workloads from other domains. We show that RTC saves 31.9% and 16.9% DRAM energy for Face Recognition and Bayesian Confidence Propagation Neural Network (BCPNN), respectively. We believe RTC can be applied to other applications whose memory access patterns remain predictable for a sufficiently long time. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1910.06672v2-abstract-full').style.display = 'none'; document.getElementById('1910.06672v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 7 October, 2020; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 15 October, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1902.07344">arXiv:1902.07344</a> <span> [<a href="https://arxiv.org/pdf/1902.07344">pdf</a>, <a href="https://arxiv.org/format/1902.07344">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> </div> </div> <p class="title is-5 mathjax"> Dataplant: Enhancing System Security with Low-Cost In-DRAM Value Generation Primitives </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yaohua Wang</a>, <a href="/search/cs?searchtype=author&query=Puddu%2C+I">Ivan Puddu</a>, <a href="/search/cs?searchtype=author&query=Sadrosadati%2C+M">Mohammad Sadrosadati</a>, <a href="/search/cs?searchtype=author&query=Razavi%2C+K">Kaveh Razavi</a>, <a href="/search/cs?searchtype=author&query=G%C3%B3mez-Luna%2C+J">Juan G贸mez-Luna</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Mansouri-Ghiasi%2C+N">Nika Mansouri-Ghiasi</a>, <a href="/search/cs?searchtype=author&query=Tavakkol%2C+A">Arash Tavakkol</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Kim%2C+J">Jeremie Kim</a>, <a href="/search/cs?searchtype=author&query=Seshadri%2C+V">Vivek Seshadri</a>, <a href="/search/cs?searchtype=author&query=Kang%2C+U">Uksong Kang</a>, <a href="/search/cs?searchtype=author&query=Ghose%2C+S">Saugata Ghose</a>, <a href="/search/cs?searchtype=author&query=Azevedo%2C+R">Rodolfo Azevedo</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1902.07344v2-abstract-short" style="display: inline;"> DRAM manufacturers have been prioritizing memory capacity, yield, and bandwidth for years, while trying to keep the design complexity as simple as possible. DRAM chips do not carry out any computation or other important functions, such as security. Processors implement most of the existing security mechanisms that protect the system against security threats, because 1) executing security mechanism… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.07344v2-abstract-full').style.display = 'inline'; document.getElementById('1902.07344v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1902.07344v2-abstract-full" style="display: none;"> DRAM manufacturers have been prioritizing memory capacity, yield, and bandwidth for years, while trying to keep the design complexity as simple as possible. DRAM chips do not carry out any computation or other important functions, such as security. Processors implement most of the existing security mechanisms that protect the system against security threats, because 1) executing security mechanisms usually require non-trivial computational capabilities (e.g., encryption), and 2) commodity DRAM chips are not designed to perform computations or tasks other than data storage. In this work, we advocate for DRAM as a key component for providing security mechanisms to the system. To this end, we propose Dataplant, a new class of low-cost, high-performance, and reliable security primitives that can be integrated in commodity DRAM chips with minimal changes. The main idea of Dataplant is to slightly modify the internal DRAM timing signals to expose the inherent process variation found in all DRAM chips for generating unpredictable but reproducible values (e.g., keys) within DRAM. We use Dataplant to build two new security mechanisms. First, a new Dataplant-based physical unclonable function (PUF) with non-destructive read-out, low evaluation latency, robust responses, resiliency to temperature changes, and data-independent responses. Second, a new cold boot attack prevention mechanism that automatically destroys all data within DRAM on every power cycle with zero run-time energy and latency overheads. Using a combination of detailed simulations and experiments with 136 real commodity DRAM chips, we show that our Dataplant-based PUF has 1.8x higher throughput than the best state-of-the-art DRAM PUFs. We also demonstrate that our Dataplant-based cold boot attack protection mechanism is 19.5x faster and consumes 2.54x less energy when compared to existing mechanisms. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1902.07344v2-abstract-full').style.display = 'none'; document.getElementById('1902.07344v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 November, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 19 February, 2019; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> February 2019. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.09929">arXiv:1810.09929</a> <span> [<a href="https://arxiv.org/pdf/1810.09929">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Signal Processing">eess.SP</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Robotics">cs.RO</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Systems and Control">eess.SY</span> </div> </div> <p class="title is-5 mathjax"> Teleoperated Robotic Arm Movement Using EMG Signal With Wearable MYO Armband </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hassan%2C+H+F">Hussein F. Hassan</a>, <a href="/search/cs?searchtype=author&query=Abou-Loukh%2C+S+J">Sadiq J. Abou-Loukh</a>, <a href="/search/cs?searchtype=author&query=Ibraheem%2C+I+K">Ibraheem Kasim Ibraheem</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.09929v1-abstract-short" style="display: inline;"> The main purpose of this research is to move the robotic arm (5DoF) in real-time, based on the surface Electromyography (sEMG) signals, as obtained from the wireless Myo gesture armband to distinguish seven hand movements. The sEMG signals are biomedical signals that estimate and record the electrical signals produced in muscles through their contraction and relaxation, representing neuromuscular… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.09929v1-abstract-full').style.display = 'inline'; document.getElementById('1810.09929v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.09929v1-abstract-full" style="display: none;"> The main purpose of this research is to move the robotic arm (5DoF) in real-time, based on the surface Electromyography (sEMG) signals, as obtained from the wireless Myo gesture armband to distinguish seven hand movements. The sEMG signals are biomedical signals that estimate and record the electrical signals produced in muscles through their contraction and relaxation, representing neuromuscular activities. Therefore, controlling the robotic arm via the muscles of the human arm using sEMG signals is considered to be one of the most significant methods. The wireless Myo gesture armband is used to record sEMG signals from the forearm. In order to analyze these signals, the pattern recognition system is employed, which consists of three main parts: segmentation, feature extraction, and classification. Overlap technique is chosen for segmenting part of the signal. Six time domain features (MAV, WL, RMS, AR, ZC, and SSC) are extracted from each segment. The classifiers (SVM, LDA, and KNN) are employed to enable comparison between them in order to obtain optimum accuracy of the system. The results show that the SVM achieves higher system accuracy at 96.57 %, compared to LDA reaching 96.01 %, and 92.67 % accuracy achieved by KNN. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.09929v1-abstract-full').style.display = 'none'; document.getElementById('1810.09929v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 3 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.09360">arXiv:1810.09360</a> <span> [<a href="https://arxiv.org/pdf/1810.09360">pdf</a>, <a href="https://arxiv.org/format/1810.09360">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Distributed, Parallel, and Cluster Computing">cs.DC</span> </div> </div> <p class="title is-5 mathjax"> Enabling Efficient RDMA-based Synchronous Mirroring of Persistent Memory Transactions </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Tavakkol%2C+A">Arash Tavakkol</a>, <a href="/search/cs?searchtype=author&query=Kolli%2C+A">Aasheesh Kolli</a>, <a href="/search/cs?searchtype=author&query=Novakovic%2C+S">Stanko Novakovic</a>, <a href="/search/cs?searchtype=author&query=Razavi%2C+K">Kaveh Razavi</a>, <a href="/search/cs?searchtype=author&query=Gomez-Luna%2C+J">Juan Gomez-Luna</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Barthels%2C+C">Claude Barthels</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+Y">Yaohua Wang</a>, <a href="/search/cs?searchtype=author&query=Sadrosadati%2C+M">Mohammad Sadrosadati</a>, <a href="/search/cs?searchtype=author&query=Ghose%2C+S">Saugata Ghose</a>, <a href="/search/cs?searchtype=author&query=Singla%2C+A">Ankit Singla</a>, <a href="/search/cs?searchtype=author&query=Subrahmanyam%2C+P">Pratap Subrahmanyam</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.09360v1-abstract-short" style="display: inline;"> Synchronous Mirroring (SM) is a standard approach to building highly-available and fault-tolerant enterprise storage systems. SM ensures strong data consistency by maintaining multiple exact data replicas and synchronously propagating every update to all of them. Such strong consistency provides fault tolerance guarantees and a simple programming model coveted by enterprise system designers. For c… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.09360v1-abstract-full').style.display = 'inline'; document.getElementById('1810.09360v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.09360v1-abstract-full" style="display: none;"> Synchronous Mirroring (SM) is a standard approach to building highly-available and fault-tolerant enterprise storage systems. SM ensures strong data consistency by maintaining multiple exact data replicas and synchronously propagating every update to all of them. Such strong consistency provides fault tolerance guarantees and a simple programming model coveted by enterprise system designers. For current storage devices, SM comes at modest performance overheads. This is because performing both local and remote updates simultaneously is only marginally slower than performing just local updates, due to the relatively slow performance of accesses to storage in today's systems. However, emerging persistent memory and ultra-low-latency network technologies necessitate a careful re-evaluation of the existing SM techniques, as these technologies present fundamentally different latency characteristics compared than their traditional counterparts. In addition to that, existing low-latency network technologies, such as Remote Direct Memory Access (RDMA), provide limited ordering guarantees and do not provide durability guarantees necessary for SM. To evaluate the performance implications of RDMA-based SM, we develop a rigorous testing framework that is based on emulated persistent memory. Our testing framework makes use of two different tools: (i) a configurable microbenchmark and (ii) a modified version of the WHISPER benchmark suite, which comprises a set of common cloud applications. Using this framework, we find that recently proposed RDMA primitives, such as remote commit, provide correctness guarantees, but do not take full advantage of the asynchronous nature of RDMA hardware. To this end, we propose new primitives enabling efficient and correct SM over RDMA, and use these primitives to develop two new techniques delivering high-performance SM of persistent memories. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.09360v1-abstract-full').style.display = 'none'; document.getElementById('1810.09360v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 22 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1810.03552">arXiv:1810.03552</a> <span> [<a href="https://arxiv.org/pdf/1810.03552">pdf</a>, <a href="https://arxiv.org/format/1810.03552">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Machine Learning">cs.LG</span> </div> </div> <p class="title is-5 mathjax"> Multi-Source Cross-Lingual Model Transfer: Learning What to Share </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chen%2C+X">Xilun Chen</a>, <a href="/search/cs?searchtype=author&query=Awadallah%2C+A+H">Ahmed Hassan Awadallah</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hany Hassan</a>, <a href="/search/cs?searchtype=author&query=Wang%2C+W">Wei Wang</a>, <a href="/search/cs?searchtype=author&query=Cardie%2C+C">Claire Cardie</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1810.03552v3-abstract-short" style="display: inline;"> Modern NLP applications have enjoyed a great boost utilizing neural networks models. Such deep neural models, however, are not applicable to most human languages due to the lack of annotated training data for various NLP tasks. Cross-lingual transfer learning (CLTL) is a viable method for building NLP models for a low-resource target language by leveraging labeled data from other (source) language… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.03552v3-abstract-full').style.display = 'inline'; document.getElementById('1810.03552v3-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1810.03552v3-abstract-full" style="display: none;"> Modern NLP applications have enjoyed a great boost utilizing neural networks models. Such deep neural models, however, are not applicable to most human languages due to the lack of annotated training data for various NLP tasks. Cross-lingual transfer learning (CLTL) is a viable method for building NLP models for a low-resource target language by leveraging labeled data from other (source) languages. In this work, we focus on the multilingual transfer setting where training data in multiple source languages is leveraged to further boost target language performance. Unlike most existing methods that rely only on language-invariant features for CLTL, our approach coherently utilizes both language-invariant and language-specific features at instance level. Our model leverages adversarial networks to learn language-invariant features, and mixture-of-experts models to dynamically exploit the similarity between the target language and each individual source language. This enables our model to learn effectively what to share between various languages in the multilingual setup. Moreover, when coupled with unsupervised multilingual embeddings, our model can operate in a zero-resource setting where neither target language training data nor cross-lingual resources are available. Our model achieves significant performance gains over prior art, as shown in an extensive set of experiments over multiple text classification and sequence tagging tasks including a large-scale industry dataset. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1810.03552v3-abstract-full').style.display = 'none'; document.getElementById('1810.03552v3-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 5 June, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 8 October, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> October 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">ACL 2019</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1809.07858">arXiv:1809.07858</a> <span> [<a href="https://arxiv.org/pdf/1809.07858">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computational Engineering, Finance, and Science">cs.CE</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Data Structures and Algorithms">cs.DS</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Genomics">q-bio.GN</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1093/bioinformatics/btz234">10.1093/bioinformatics/btz234 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Shouji: A Fast and Efficient Pre-Alignment Filter for Sequence Alignment </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Alser%2C+M">Mohammed Alser</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Kumar%2C+A">Akash Kumar</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a>, <a href="/search/cs?searchtype=author&query=Alkan%2C+C">Can Alkan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1809.07858v4-abstract-short" style="display: inline;"> Motivation: The ability to generate massive amounts of sequencing data continues to overwhelm the processing capability of existing algorithms and compute infrastructures. In this work, we explore the use of hardware/software co-design and hardware acceleration to significantly reduce the execution time of short sequence alignment, a crucial step in analyzing sequenced genomes. We introduce Shouji… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1809.07858v4-abstract-full').style.display = 'inline'; document.getElementById('1809.07858v4-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1809.07858v4-abstract-full" style="display: none;"> Motivation: The ability to generate massive amounts of sequencing data continues to overwhelm the processing capability of existing algorithms and compute infrastructures. In this work, we explore the use of hardware/software co-design and hardware acceleration to significantly reduce the execution time of short sequence alignment, a crucial step in analyzing sequenced genomes. We introduce Shouji, a highly-parallel and accurate pre-alignment filter that remarkably reduces the need for computationally-costly dynamic programming algorithms. The first key idea of our proposed pre-alignment filter is to provide high filtering accuracy by correctly detecting all common subsequences shared between two given sequences. The second key idea is to design a hardware accelerator that adopts modern FPGA (Field-Programmable Gate Array) architectures to further boost the performance of our algorithm. Results: Shouji significantly improves the accuracy of pre-alignment filtering by up to two orders of magnitude compared to the state-of-the-art pre-alignment filters, GateKeeper and SHD. Our FPGA-based accelerator is up to three orders of magnitude faster than the equivalent CPU implementation of Shouji. Using a single FPGA chip, we benchmark the benefits of integrating Shouji with five state-of-the-art sequence aligners, designed for different computing platforms. The addition of Shouji as a pre-alignment step reduces the execution time of the five state-of-the-art sequence aligners by up to 18.8x. Shouji can be adapted for any bioinformatics pipeline that performs sequence alignment for verification. Unlike most existing methods that aim to accelerate sequence alignment, Shouji does not sacrifice any of the aligner capabilities, as it does not modify or replace the alignment step. Availability: https://github.com/CMU-SAFARI/Shouji <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1809.07858v4-abstract-full').style.display = 'none'; document.getElementById('1809.07858v4-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 15 April, 2019; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 18 September, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> September 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">https://academic.oup.com/bioinformatics/advance-article-abstract/doi/10.1093/bioinformatics/btz234/5421509, Bioinformatics Journal 2019</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> Bioinformatics, Nov 1; 35 (21): 4255 - 4263, 2019 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1808.04286">arXiv:1808.04286</a> <span> [<a href="https://arxiv.org/pdf/1808.04286">pdf</a>, <a href="https://arxiv.org/format/1808.04286">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> D-RaNGe: Using Commodity DRAM Devices to Generate True Random Numbers with Low Latency and High Throughput </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Kim%2C+J+S">Jeremie S. Kim</a>, <a href="/search/cs?searchtype=author&query=Patel%2C+M">Minesh Patel</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Orosa%2C+L">Lois Orosa</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1808.04286v2-abstract-short" style="display: inline;"> We propose a new DRAM-based true random number generator (TRNG) that leverages DRAM cells as an entropy source. The key idea is to intentionally violate the DRAM access timing parameters and use the resulting errors as the source of randomness. Our technique specifically decreases the DRAM row activation latency (timing parameter tRCD) below manufacturer-recommended specifications, to induce read… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.04286v2-abstract-full').style.display = 'inline'; document.getElementById('1808.04286v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1808.04286v2-abstract-full" style="display: none;"> We propose a new DRAM-based true random number generator (TRNG) that leverages DRAM cells as an entropy source. The key idea is to intentionally violate the DRAM access timing parameters and use the resulting errors as the source of randomness. Our technique specifically decreases the DRAM row activation latency (timing parameter tRCD) below manufacturer-recommended specifications, to induce read errors, or activation failures, that exhibit true random behavior. We then aggregate the resulting data from multiple cells to obtain a TRNG capable of providing a high throughput of random numbers at low latency. To demonstrate that our TRNG design is viable using commodity DRAM chips, we rigorously characterize the behavior of activation failures in 282 state-of-the-art LPDDR4 devices from three major DRAM manufacturers. We verify our observations using four additional DDR3 DRAM devices from the same manufacturers. Our results show that many cells in each device produce random data that remains robust over both time and temperature variation. We use our observations to develop D-RanGe, a methodology for extracting true random numbers from commodity DRAM devices with high throughput and low latency by deliberately violating the read access timing parameters. We evaluate the quality of our TRNG using the commonly-used NIST statistical test suite for randomness and find that D-RaNGe: 1) successfully passes each test, and 2) generates true random numbers with over two orders of magnitude higher throughput than the previous highest-throughput DRAM-based TRNG. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1808.04286v2-abstract-full').style.display = 'none'; document.getElementById('1808.04286v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 25 December, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 13 August, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> August 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">An earlier version was submitted to and reviewed by the International Symposium on Microarchitecture (51) 2018, with a submission deadline on April 6th, 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1807.05102">arXiv:1807.05102</a> <span> [<a href="https://arxiv.org/pdf/1807.05102">pdf</a>, <a href="https://arxiv.org/format/1807.05102">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> What Your DRAM Power Models Are Not Telling You: Lessons from a Detailed Experimental Study </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Ghose%2C+S">Saugata Ghose</a>, <a href="/search/cs?searchtype=author&query=Ya%C4%9Fl%C4%B1k%C3%A7%C4%B1%2C+A+G">Abdullah Giray Ya臒l谋k莽谋</a>, <a href="/search/cs?searchtype=author&query=Gupta%2C+R">Raghav Gupta</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+D">Donghyuk Lee</a>, <a href="/search/cs?searchtype=author&query=Kudrolli%2C+K">Kais Kudrolli</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+W+X">William X. Liu</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+K+K">Kevin K. Chang</a>, <a href="/search/cs?searchtype=author&query=Chatterjee%2C+N">Niladrish Chatterjee</a>, <a href="/search/cs?searchtype=author&query=Agrawal%2C+A">Aditya Agrawal</a>, <a href="/search/cs?searchtype=author&query=O%27Connor%2C+M">Mike O'Connor</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1807.05102v1-abstract-short" style="display: inline;"> Main memory (DRAM) consumes as much as half of the total system power in a computer today, resulting in a growing need to develop new DRAM architectures and systems that consume less power. Researchers have long relied on DRAM power models that are based off of standardized current measurements provided by vendors, called IDD values. Unfortunately, we find that these models are highly inaccurate,… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.05102v1-abstract-full').style.display = 'inline'; document.getElementById('1807.05102v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1807.05102v1-abstract-full" style="display: none;"> Main memory (DRAM) consumes as much as half of the total system power in a computer today, resulting in a growing need to develop new DRAM architectures and systems that consume less power. Researchers have long relied on DRAM power models that are based off of standardized current measurements provided by vendors, called IDD values. Unfortunately, we find that these models are highly inaccurate, and do not reflect the actual power consumed by real DRAM devices. We perform the first comprehensive experimental characterization of the power consumed by modern real-world DRAM modules. Our extensive characterization of 50 DDR3L DRAM modules from three major vendors yields four key new observations about DRAM power consumption: (1) across all IDD values that we measure, the current consumed by real DRAM modules varies significantly from the current specified by the vendors; (2) DRAM power consumption strongly depends on the data value that is read or written; (3) there is significant structural variation, where the same banks and rows across multiple DRAM modules from the same model consume more power than other banks or rows; and (4) over successive process technology generations, DRAM power consumption has not decreased by as much as vendor specifications have indicated. Based on our detailed analysis and characterization data, we develop the Variation-Aware model of Memory Power Informed by Real Experiments (VAMPIRE). We show that VAMPIRE has a mean absolute percentage error of only 6.8% compared to actual measured DRAM power. VAMPIRE enables a wide range of studies that were not possible using prior DRAM power models. As an example, we use VAMPIRE to evaluate a new power-aware data encoding mechanism, which can reduce DRAM energy consumption by an average of 12.2%. We plan to open-source both VAMPIRE and our extensive raw data collected during our experimental characterization. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1807.05102v1-abstract-full').style.display = 'none'; document.getElementById('1807.05102v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 13 July, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> July 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">presented at SIGMETRICS 2018</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.06691">arXiv:1805.06691</a> <span> [<a href="https://arxiv.org/pdf/1805.06691">pdf</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Cryptography and Security">cs.CR</span> <span class="tag is-small is-grey tooltip is-tooltip-top" data-tooltip="Networking and Internet Architecture">cs.NI</span> </div> <div class="is-inline-block" style="margin-left: 0.5rem"> <div class="tags has-addons"> <span class="tag is-dark is-size-7">doi</span> <span class="tag is-light is-size-7"><a class="" href="https://doi.org/10.1109/UkrMiCo.2017.8095429">10.1109/UkrMiCo.2017.8095429 <i class="fa fa-external-link" aria-hidden="true"></i></a></span> </div> </div> </div> <p class="title is-5 mathjax"> Test for penetration in Wi-Fi network: attacks on WPA2-PSK and WPA2-Enterprise </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Radivilova%2C+T">Tamara Radivilova</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H+A">Hassan Ali Hassan</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.06691v1-abstract-short" style="display: inline;"> In this work the wireless networks security algorithms were analyzed. The fundamentals of the WPA and WPA2 safety algorithms, their weaknesses and ways of attacking WPA and WPA2 Enterprise Wireless Networks are described. Successful attack on the WPA2-PSK and WPA2-Enterprise was carried out during the performance of work. The progress of this attack and its results were described. </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.06691v1-abstract-full" style="display: none;"> In this work the wireless networks security algorithms were analyzed. The fundamentals of the WPA and WPA2 safety algorithms, their weaknesses and ways of attacking WPA and WPA2 Enterprise Wireless Networks are described. Successful attack on the WPA2-PSK and WPA2-Enterprise was carried out during the performance of work. The progress of this attack and its results were described. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.06691v1-abstract-full').style.display = 'none'; document.getElementById('1805.06691v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 17 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">4 pages</span> </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">ACM Class:</span> D.4.6; E.3; C.2 </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Journal ref:</span> T. Radivilova and H. A. Hassan, "Test for penetration in Wi-Fi network: Attacks on WPA2-PSK and WPA2-enterprise," (UkrMiCo), Odessa, 2017, pp. 1-4 </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.03969">arXiv:1805.03969</a> <span> [<a href="https://arxiv.org/pdf/1805.03969">pdf</a>, <a href="https://arxiv.org/format/1805.03969">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Exploiting Row-Level Temporal Locality in DRAM to Reduce the Memory Access Latency </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Pekhimenko%2C+G">Gennady Pekhimenko</a>, <a href="/search/cs?searchtype=author&query=Vijaykumar%2C+N">Nandita Vijaykumar</a>, <a href="/search/cs?searchtype=author&query=Seshadri%2C+V">Vivek Seshadri</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+D">Donghyuk Lee</a>, <a href="/search/cs?searchtype=author&query=Ergin%2C+O">Oguz Ergin</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.03969v1-abstract-short" style="display: inline;"> This paper summarizes the idea of ChargeCache, which was published in HPCA 2016 [51], and examines the work's significance and future potential. DRAM latency continues to be a critical bottleneck for system performance. In this work, we develop a low-cost mechanism, called ChargeCache, that enables faster access to recently-accessed rows in DRAM, with no modifications to DRAM chips. Our mechanism… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.03969v1-abstract-full').style.display = 'inline'; document.getElementById('1805.03969v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.03969v1-abstract-full" style="display: none;"> This paper summarizes the idea of ChargeCache, which was published in HPCA 2016 [51], and examines the work's significance and future potential. DRAM latency continues to be a critical bottleneck for system performance. In this work, we develop a low-cost mechanism, called ChargeCache, that enables faster access to recently-accessed rows in DRAM, with no modifications to DRAM chips. Our mechanism is based on the key observation that a recently-accessed row has more charge and thus the following access to the same row can be performed faster. To exploit this observation, we propose to track the addresses of recently-accessed rows in a table in the memory controller. If a later DRAM request hits in that table, the memory controller uses lower timing parameters, leading to reduced DRAM latency. Row addresses are removed from the table after a specified duration to ensure rows that have leaked too much charge are not accessed with lower latency. We evaluate ChargeCache on a wide variety of workloads and show that it provides significant performance and energy benefits for both single-core and multi-core systems. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.03969v1-abstract-full').style.display = 'none'; document.getElementById('1805.03969v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> <p class="comments is-size-7"> <span class="has-text-black-bis has-text-weight-semibold">Comments:</span> <span class="has-text-grey-dark mathjax">arXiv admin note: substantial text overlap with arXiv:1609.07234</span> </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.03195">arXiv:1805.03195</a> <span> [<a href="https://arxiv.org/pdf/1805.03195">pdf</a>, <a href="https://arxiv.org/format/1805.03195">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> SoftMC: Practical DRAM Characterization Using an FPGA-Based Infrastructure </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Vijaykumar%2C+N">Nandita Vijaykumar</a>, <a href="/search/cs?searchtype=author&query=Khan%2C+S">Samira Khan</a>, <a href="/search/cs?searchtype=author&query=Ghose%2C+S">Saugata Ghose</a>, <a href="/search/cs?searchtype=author&query=Chang%2C+K">Kevin Chang</a>, <a href="/search/cs?searchtype=author&query=Pekhimenko%2C+G">Gennady Pekhimenko</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+D">Donghyuk Lee</a>, <a href="/search/cs?searchtype=author&query=Ergin%2C+O">Oguz Ergin</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.03195v1-abstract-short" style="display: inline;"> This paper summarizes the SoftMC DRAM characterization infrastructure, which was published in HPCA 2017, and examines the work's significance and future potential. SoftMC (Soft Memory Controller) is the first publicly-available DRAM testing infrastructure that can flexibly and efficiently test DRAM chips in a manner accessible to both software and hardware developers. SoftMC is an FPGA-based tes… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.03195v1-abstract-full').style.display = 'inline'; document.getElementById('1805.03195v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.03195v1-abstract-full" style="display: none;"> This paper summarizes the SoftMC DRAM characterization infrastructure, which was published in HPCA 2017, and examines the work's significance and future potential. SoftMC (Soft Memory Controller) is the first publicly-available DRAM testing infrastructure that can flexibly and efficiently test DRAM chips in a manner accessible to both software and hardware developers. SoftMC is an FPGA-based testing platform that can control and test memory modules designed for the commonly-used DDR (Double Data Rate) interface. SoftMC has two key properties: (i) it provides flexibility to thoroughly control memory behavior or to implement a wide range of mechanisms using DDR commands; and (ii) it is easy to use as it provides a simple and intuitive high-level programming interface for users, completely hiding the low-level details of the FPGA. We demonstrate the capability, flexibility, and programming ease of SoftMC with two example use cases. First, we implement a test that characterizes the retention time of DRAM cells. Second, we show that the expected latency reduction of two recently-proposed mechanisms, which rely on accessing recently-refreshed or recently-accessed DRAM cells faster than other DRAM cells, is not observable in existing DRAM chips. Various versions of the SoftMC platform have enabled many of our other DRAM characterization studies. We discuss several other use cases of SoftMC, including the ability to characterize emerging non-volatile memory modules that obey the DDR standard. We hope that our open-source release of SoftMC fills a gap in the space of publicly-available experimental memory testing infrastructures and inspires new studies, ideas, and methodologies in memory system design. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.03195v1-abstract-full').style.display = 'none'; document.getElementById('1805.03195v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.03175">arXiv:1805.03175</a> <span> [<a href="https://arxiv.org/pdf/1805.03175">pdf</a>, <a href="https://arxiv.org/format/1805.03175">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Voltron: Understanding and Exploiting the Voltage-Latency-Reliability Trade-Offs in Modern DRAM Chips to Improve Energy Efficiency </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chang%2C+K+K">Kevin K. Chang</a>, <a href="/search/cs?searchtype=author&query=Yagl%C4%B1k%C3%A7%C4%B1%2C+A+G">Abdullah Giray Yagl谋k莽谋</a>, <a href="/search/cs?searchtype=author&query=Ghose%2C+S">Saugata Ghose</a>, <a href="/search/cs?searchtype=author&query=Agrawal%2C+A">Aditya Agrawal</a>, <a href="/search/cs?searchtype=author&query=Chatterjee%2C+N">Niladrish Chatterjee</a>, <a href="/search/cs?searchtype=author&query=Kashyap%2C+A">Abhijith Kashyap</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+D">Donghyuk Lee</a>, <a href="/search/cs?searchtype=author&query=O%27Connor%2C+M">Mike O'Connor</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.03175v1-abstract-short" style="display: inline;"> This paper summarizes our work on experimental characterization and analysis of reduced-voltage operation in modern DRAM chips, which was published in SIGMETRICS 2017, and examines the work's significance and future potential. We take a comprehensive approach to understanding and exploiting the latency and reliability characteristics of modern DRAM when the DRAM supply voltage is lowered below t… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.03175v1-abstract-full').style.display = 'inline'; document.getElementById('1805.03175v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.03175v1-abstract-full" style="display: none;"> This paper summarizes our work on experimental characterization and analysis of reduced-voltage operation in modern DRAM chips, which was published in SIGMETRICS 2017, and examines the work's significance and future potential. We take a comprehensive approach to understanding and exploiting the latency and reliability characteristics of modern DRAM when the DRAM supply voltage is lowered below the nominal voltage level specified by DRAM standards. We perform an experimental study of 124 real DDR3L (low-voltage) DRAM chips manufactured recently by three major DRAM vendors. We find that reducing the supply voltage below a certain point introduces bit errors in the data, and we comprehensively characterize the behavior of these errors. We discover that these errors can be avoided by increasing the latency of three major DRAM operations (activation, restoration, and precharge). We perform detailed DRAM circuit simulations to validate and explain our experimental findings. We also characterize the various relationships between reduced supply voltage and error locations, stored data patterns, DRAM temperature, and data retention. Based on our observations, we propose a new DRAM energy reduction mechanism, called Voltron. The key idea of Voltron is to use a performance model to determine by how much we can reduce the supply voltage without introducing errors and without exceeding a user-specified threshold for performance loss. Our evaluations show that Voltron reduces the average DRAM and system energy consumption by 10.5% and 7.3%, respectively, while limiting the average system performance loss to only 1.8%, for a variety of memory-intensive quad-core workloads. We also show that Voltron significantly outperforms prior dynamic voltage and frequency scaling mechanisms for DRAM. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.03175v1-abstract-full').style.display = 'none'; document.getElementById('1805.03175v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1805.03154">arXiv:1805.03154</a> <span> [<a href="https://arxiv.org/pdf/1805.03154">pdf</a>, <a href="https://arxiv.org/format/1805.03154">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Hardware Architecture">cs.AR</span> </div> </div> <p class="title is-5 mathjax"> Flexible-Latency DRAM: Understanding and Exploiting Latency Variation in Modern DRAM Chips </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Chang%2C+K+K">Kevin K. Chang</a>, <a href="/search/cs?searchtype=author&query=Kashyap%2C+A">Abhijith Kashyap</a>, <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hasan Hassan</a>, <a href="/search/cs?searchtype=author&query=Ghose%2C+S">Saugata Ghose</a>, <a href="/search/cs?searchtype=author&query=Hsieh%2C+K">Kevin Hsieh</a>, <a href="/search/cs?searchtype=author&query=Lee%2C+D">Donghyuk Lee</a>, <a href="/search/cs?searchtype=author&query=Li%2C+T">Tianshi Li</a>, <a href="/search/cs?searchtype=author&query=Pekhimenko%2C+G">Gennady Pekhimenko</a>, <a href="/search/cs?searchtype=author&query=Khan%2C+S">Samira Khan</a>, <a href="/search/cs?searchtype=author&query=Mutlu%2C+O">Onur Mutlu</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1805.03154v1-abstract-short" style="display: inline;"> This article summarizes key results of our work on experimental characterization and analysis of latency variation and latency-reliability trade-offs in modern DRAM chips, which was published in SIGMETRICS 2016, and examines the work's significance and future potential. The goal of this work is to (i) experimentally characterize and understand the latency variation across cells within a DRAM chi… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.03154v1-abstract-full').style.display = 'inline'; document.getElementById('1805.03154v1-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1805.03154v1-abstract-full" style="display: none;"> This article summarizes key results of our work on experimental characterization and analysis of latency variation and latency-reliability trade-offs in modern DRAM chips, which was published in SIGMETRICS 2016, and examines the work's significance and future potential. The goal of this work is to (i) experimentally characterize and understand the latency variation across cells within a DRAM chip for these three fundamental DRAM operations, and (ii) develop new mechanisms that exploit our understanding of the latency variation to reliably improve performance. To this end, we comprehensively characterize 240 DRAM chips from three major vendors, and make six major new observations about latency variation within DRAM. Notably, we find that (i) there is large latency variation across the cells for each of the three operations; (ii) variation characteristics exhibit significant spatial locality: slower cells are clustered in certain regions of a DRAM chip; and (iii) the three fundamental operations exhibit different reliability characteristics when the latency of each operation is reduced. Based on our observations, we propose Flexible-LatencY DRAM (FLY-DRAM), a mechanism that exploits latency variation across DRAM cells within a DRAM chip to improve system performance. The key idea of FLY-DRAM is to exploit the spatial locality of slower cells within DRAM, and access the faster DRAM regions with reduced latencies for the fundamental operations. Our evaluations show that FLY-DRAM improves the performance of a wide range of applications by 13.3%, 17.6%, and 19.5%, on average, for each of the three different vendors' real DRAM chips, in a simulated 8-core system. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1805.03154v1-abstract-full').style.display = 'none'; document.getElementById('1805.03154v1-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 8 May, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> May 2018. </p> </li> <li class="arxiv-result"> <div class="is-marginless"> <p class="list-title is-inline-block"><a href="https://arxiv.org/abs/1803.05567">arXiv:1803.05567</a> <span> [<a href="https://arxiv.org/pdf/1803.05567">pdf</a>, <a href="https://arxiv.org/format/1803.05567">other</a>] </span> </p> <div class="tags is-inline-block"> <span class="tag is-small is-link tooltip is-tooltip-top" data-tooltip="Computation and Language">cs.CL</span> </div> </div> <p class="title is-5 mathjax"> Achieving Human Parity on Automatic Chinese to English News Translation </p> <p class="authors"> <span class="search-hit">Authors:</span> <a href="/search/cs?searchtype=author&query=Hassan%2C+H">Hany Hassan</a>, <a href="/search/cs?searchtype=author&query=Aue%2C+A">Anthony Aue</a>, <a href="/search/cs?searchtype=author&query=Chen%2C+C">Chang Chen</a>, <a href="/search/cs?searchtype=author&query=Chowdhary%2C+V">Vishal Chowdhary</a>, <a href="/search/cs?searchtype=author&query=Clark%2C+J">Jonathan Clark</a>, <a href="/search/cs?searchtype=author&query=Federmann%2C+C">Christian Federmann</a>, <a href="/search/cs?searchtype=author&query=Huang%2C+X">Xuedong Huang</a>, <a href="/search/cs?searchtype=author&query=Junczys-Dowmunt%2C+M">Marcin Junczys-Dowmunt</a>, <a href="/search/cs?searchtype=author&query=Lewis%2C+W">William Lewis</a>, <a href="/search/cs?searchtype=author&query=Li%2C+M">Mu Li</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+S">Shujie Liu</a>, <a href="/search/cs?searchtype=author&query=Liu%2C+T">Tie-Yan Liu</a>, <a href="/search/cs?searchtype=author&query=Luo%2C+R">Renqian Luo</a>, <a href="/search/cs?searchtype=author&query=Menezes%2C+A">Arul Menezes</a>, <a href="/search/cs?searchtype=author&query=Qin%2C+T">Tao Qin</a>, <a href="/search/cs?searchtype=author&query=Seide%2C+F">Frank Seide</a>, <a href="/search/cs?searchtype=author&query=Tan%2C+X">Xu Tan</a>, <a href="/search/cs?searchtype=author&query=Tian%2C+F">Fei Tian</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+L">Lijun Wu</a>, <a href="/search/cs?searchtype=author&query=Wu%2C+S">Shuangzhi Wu</a>, <a href="/search/cs?searchtype=author&query=Xia%2C+Y">Yingce Xia</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+D">Dongdong Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhang%2C+Z">Zhirui Zhang</a>, <a href="/search/cs?searchtype=author&query=Zhou%2C+M">Ming Zhou</a> </p> <p class="abstract mathjax"> <span class="has-text-black-bis has-text-weight-semibold">Abstract</span>: <span class="abstract-short has-text-grey-dark mathjax" id="1803.05567v2-abstract-short" style="display: inline;"> Machine translation has made rapid advances in recent years. Millions of people are using it today in online translation systems and mobile applications in order to communicate across language barriers. The question naturally arises whether such systems can approach or achieve parity with human translations. In this paper, we first address the problem of how to define and accurately measure human… <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.05567v2-abstract-full').style.display = 'inline'; document.getElementById('1803.05567v2-abstract-short').style.display = 'none';">▽ More</a> </span> <span class="abstract-full has-text-grey-dark mathjax" id="1803.05567v2-abstract-full" style="display: none;"> Machine translation has made rapid advances in recent years. Millions of people are using it today in online translation systems and mobile applications in order to communicate across language barriers. The question naturally arises whether such systems can approach or achieve parity with human translations. In this paper, we first address the problem of how to define and accurately measure human parity in translation. We then describe Microsoft's machine translation system and measure the quality of its translations on the widely used WMT 2017 news translation task from Chinese to English. We find that our latest neural machine translation system has reached a new state-of-the-art, and that the translation quality is at human parity when compared to professional human translations. We also find that it significantly exceeds the quality of crowd-sourced non-professional translations. <a class="is-size-7" style="white-space: nowrap;" onclick="document.getElementById('1803.05567v2-abstract-full').style.display = 'none'; document.getElementById('1803.05567v2-abstract-short').style.display = 'inline';">△ Less</a> </span> </p> <p class="is-size-7"><span class="has-text-black-bis has-text-weight-semibold">Submitted</span> 29 June, 2018; <span class="has-text-black-bis has-text-weight-semibold">v1</span> submitted 14 March, 2018; <span class="has-text-black-bis has-text-weight-semibold">originally announced</span> March 2018. </p> </li> </ol> <nav class="pagination is-small is-centered breathe-horizontal" role="navigation" aria-label="pagination"> <a href="" class="pagination-previous is-invisible">Previous </a> <a href="/search/?searchtype=author&query=Hassan%2C+H&start=50" class="pagination-next" >Next </a> <ul class="pagination-list"> <li> <a href="/search/?searchtype=author&query=Hassan%2C+H&start=0" class="pagination-link is-current" aria-label="Goto page 1">1 </a> </li> <li> <a href="/search/?searchtype=author&query=Hassan%2C+H&start=50" class="pagination-link " aria-label="Page 2" aria-current="page">2 </a> </li> </ul> </nav> <div class="is-hidden-tablet"> <!-- feedback for mobile only --> <span class="help" style="display: inline-block;"><a href="https://github.com/arXiv/arxiv-search/releases">Search v0.5.6 released 2020-02-24</a> </span> </div> </div> </main> <footer> <div class="columns is-desktop" role="navigation" aria-label="Secondary"> <!-- MetaColumn 1 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/about">About</a></li> <li><a href="https://info.arxiv.org/help">Help</a></li> </ul> </div> <div class="column"> <ul class="nav-spaced"> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>contact arXiv</title><desc>Click here to contact arXiv</desc><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg> <a href="https://info.arxiv.org/help/contact.html"> Contact</a> </li> <li> <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><title>subscribe to arXiv mailings</title><desc>Click here to subscribe</desc><path d="M476 3.2L12.5 270.6c-18.1 10.4-15.8 35.6 2.2 43.2L121 358.4l287.3-253.2c5.5-4.9 13.3 2.6 8.6 8.3L176 407v80.5c0 23.6 28.5 32.9 42.5 15.8L282 426l124.6 52.2c14.2 6 30.4-2.9 33-18.2l72-432C515 7.8 493.3-6.8 476 3.2z"/></svg> <a href="https://info.arxiv.org/help/subscribe"> Subscribe</a> </li> </ul> </div> </div> </div> <!-- end MetaColumn 1 --> <!-- MetaColumn 2 --> <div class="column"> <div class="columns"> <div class="column"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/license/index.html">Copyright</a></li> <li><a href="https://info.arxiv.org/help/policies/privacy_policy.html">Privacy Policy</a></li> </ul> </div> <div class="column sorry-app-links"> <ul class="nav-spaced"> <li><a href="https://info.arxiv.org/help/web_accessibility.html">Web Accessibility Assistance</a></li> <li> <p class="help"> <a class="a11y-main-link" href="https://status.arxiv.org" target="_blank">arXiv Operational Status <svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 256 512" class="icon filter-dark_grey" role="presentation"><path d="M224.3 273l-136 136c-9.4 9.4-24.6 9.4-33.9 0l-22.6-22.6c-9.4-9.4-9.4-24.6 0-33.9l96.4-96.4-96.4-96.4c-9.4-9.4-9.4-24.6 0-33.9L54.3 103c9.4-9.4 24.6-9.4 33.9 0l136 136c9.5 9.4 9.5 24.6.1 34z"/></svg></a><br> Get status notifications via <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/email/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 512 512" class="icon filter-black" role="presentation"><path d="M502.3 190.8c3.9-3.1 9.7-.2 9.7 4.7V400c0 26.5-21.5 48-48 48H48c-26.5 0-48-21.5-48-48V195.6c0-5 5.7-7.8 9.7-4.7 22.4 17.4 52.1 39.5 154.1 113.6 21.1 15.4 56.7 47.8 92.2 47.6 35.7.3 72-32.8 92.3-47.6 102-74.1 131.6-96.3 154-113.7zM256 320c23.2.4 56.6-29.2 73.4-41.4 132.7-96.3 142.8-104.7 173.4-128.7 5.8-4.5 9.2-11.5 9.2-18.9v-19c0-26.5-21.5-48-48-48H48C21.5 64 0 85.5 0 112v19c0 7.4 3.4 14.3 9.2 18.9 30.6 23.9 40.7 32.4 173.4 128.7 16.8 12.2 50.2 41.8 73.4 41.4z"/></svg>email</a> or <a class="is-link" href="https://subscribe.sorryapp.com/24846f03/slack/new" target="_blank"><svg xmlns="http://www.w3.org/2000/svg" viewBox="0 0 448 512" class="icon filter-black" role="presentation"><path d="M94.12 315.1c0 25.9-21.16 47.06-47.06 47.06S0 341 0 315.1c0-25.9 21.16-47.06 47.06-47.06h47.06v47.06zm23.72 0c0-25.9 21.16-47.06 47.06-47.06s47.06 21.16 47.06 47.06v117.84c0 25.9-21.16 47.06-47.06 47.06s-47.06-21.16-47.06-47.06V315.1zm47.06-188.98c-25.9 0-47.06-21.16-47.06-47.06S139 32 164.9 32s47.06 21.16 47.06 47.06v47.06H164.9zm0 23.72c25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06H47.06C21.16 243.96 0 222.8 0 196.9s21.16-47.06 47.06-47.06H164.9zm188.98 47.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06s-21.16 47.06-47.06 47.06h-47.06V196.9zm-23.72 0c0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06V79.06c0-25.9 21.16-47.06 47.06-47.06 25.9 0 47.06 21.16 47.06 47.06V196.9zM283.1 385.88c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06-25.9 0-47.06-21.16-47.06-47.06v-47.06h47.06zm0-23.72c-25.9 0-47.06-21.16-47.06-47.06 0-25.9 21.16-47.06 47.06-47.06h117.84c25.9 0 47.06 21.16 47.06 47.06 0 25.9-21.16 47.06-47.06 47.06H283.1z"/></svg>slack</a> </p> </li> </ul> </div> </div> </div> <!-- end MetaColumn 2 --> </div> </footer> <script src="https://static.arxiv.org/static/base/1.0.0a5/js/member_acknowledgement.js"></script> </body> </html>