<?xml version="1.0" encoding="iso-8859-1" standalone="no"?>
<!DOCTYPE GmsArticle SYSTEM "http://www.egms.de/dtd/2.0.34/GmsArticle.dtd">
<GmsArticle xmlns:xlink="http://www.w3.org/1999/xlink">
  <MetaData>
    <Identifier>25dga003</Identifier>
    <IdentifierDoi>10.3205/25dga003</IdentifierDoi>
    <IdentifierUrn>urn:nbn:de:0183-25dga0030</IdentifierUrn>
    <ArticleType>Meeting Abstract</ArticleType>
    <TitleGroup>
      <Title language="en">Hearing-in-noise deficits &#8211; clinical measures and prescription of advanced hearing-aid features</Title>
    </TitleGroup>
    <CreatorList>
      <Creator>
        <PersonNames>
          <Lastname>Zaar</Lastname>
          <LastnameHeading>Zaar</LastnameHeading>
          <Firstname>Johannes</Firstname>
          <Initials>J</Initials>
        </PersonNames>
        <Address>
          <Affiliation>Eriksholm Research Centre, Snekkersten, D&#228;nemark</Affiliation>
          <Affiliation>Hearing Systems Section, Department of Health Technology, Technical University of Denmark, Kongens Lyngby, D&#228;nemark</Affiliation>
        </Address>
        <Creatorrole corresponding="no" presenting="yes">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Laugesen</Lastname>
          <LastnameHeading>Laugesen</LastnameHeading>
          <Firstname>S&#248;ren</Firstname>
          <Initials>S</Initials>
        </PersonNames>
        <Address>
          <Affiliation>Interacoustics Research Unit, Kongens Lyngby, D&#228;nemark</Affiliation>
        </Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Santurette</Lastname>
          <LastnameHeading>Santurette</LastnameHeading>
          <Firstname>S&#233;bastien</Firstname>
          <Initials>S</Initials>
        </PersonNames>
        <Address>
          <Affiliation>Centre for Applied Audiology Research, Oticon A&#47;S, Sm&#248;rum, D&#228;nemark</Affiliation>
        </Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Jones</Lastname>
          <LastnameHeading>Jones</LastnameHeading>
          <Firstname>Gary</Firstname>
          <Initials>G</Initials>
        </PersonNames>
        <Address>
          <Affiliation>Demant A&#47;S, Sm&#248;rum, D&#228;nemark</Affiliation>
        </Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Vatti</Lastname>
          <LastnameHeading>Vatti</LastnameHeading>
          <Firstname>Marianna</Firstname>
          <Initials>M</Initials>
        </PersonNames>
        <Address>
          <Affiliation>Centre for Applied Audiology Research, Oticon A&#47;S, Sm&#248;rum, D&#228;nemark</Affiliation>
        </Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Tanaka</Lastname>
          <LastnameHeading>Tanaka</LastnameHeading>
          <Firstname>Chiemi</Firstname>
          <Initials>C</Initials>
        </PersonNames>
        <Address>
          <Affiliation>Oticon Japan, Kawasaki, Japan</Affiliation>
          <Affiliation>Diatec Japan, Kawasaki, Japan</Affiliation>
        </Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>Ihly</Lastname>
          <LastnameHeading>Ihly</LastnameHeading>
          <Firstname>Peter</Firstname>
          <Initials>P</Initials>
        </PersonNames>
        <Address>
          <Affiliation>Institute of Acoustics, University of Applied Sciences L&#252;beck, L&#252;beck, Deutschland</Affiliation>
        </Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
      <Creator>
        <PersonNames>
          <Lastname>J&#252;rgens</Lastname>
          <LastnameHeading>J&#252;rgens</LastnameHeading>
          <Firstname>Tim</Firstname>
          <Initials>T</Initials>
        </PersonNames>
        <Address>
          <Affiliation>Institute of Acoustics, University of Applied Sciences L&#252;beck, L&#252;beck, Deutschland</Affiliation>
        </Address>
        <Creatorrole corresponding="no" presenting="no">author</Creatorrole>
      </Creator>
    </CreatorList>
    <PublisherList>
      <Publisher>
        <Corporation>
          <Corporatename>German Medical Science GMS Publishing House</Corporatename>
        </Corporation>
        <Address>D&#252;sseldorf</Address>
      </Publisher>
    </PublisherList>
    <SubjectGroup>
      <SubjectheadingDDB>610</SubjectheadingDDB>
    </SubjectGroup>
    <DatePublishedList>
      <DatePublished>20250318</DatePublished>
    </DatePublishedList>
    <Language>engl</Language>
    <License license-type="open-access" xlink:href="http://creativecommons.org/licenses/by/4.0/">
      <AltText language="en">This is an Open Access article distributed under the terms of the Creative Commons Attribution 4.0 License.</AltText>
      <AltText language="de">Dieser Artikel ist ein Open-Access-Artikel und steht unter den Lizenzbedingungen der Creative Commons Attribution 4.0 License (Namensnennung).</AltText>
    </License>
    <SourceGroup>
      <Meeting>
        <MeetingId>M0607</MeetingId>
        <MeetingSequence>003</MeetingSequence>
        <MeetingCorporation>Deutsche Gesellschaft f&#252;r Audiologie e. V. und ADANO</MeetingCorporation>
        <MeetingName>27. Jahrestagung der Deutschen Gesellschaft f&#252;r Audiologie und Arbeitstagung der Arbeitsgemeinschaft Deutschsprachiger Audiologen, Neurootologen und Otologen</MeetingName>
        <MeetingTitle></MeetingTitle>
        <MeetingSession>Strukturierte Sitzung 2: Audiologische H&#246;rger&#228;temerkmale &#8211; Evaluierung und neue Entwicklungen</MeetingSession>
        <MeetingCity>G&#246;ttingen</MeetingCity>
        <MeetingDate>
          <DateFrom>20250319</DateFrom>
          <DateTo>20250321</DateTo>
        </MeetingDate>
      </Meeting>
    </SourceGroup>
    <ArticleNo>003</ArticleNo>
  </MetaData>
  <OrigData>
    <TextBlock name="Text" linked="yes">
      <MainHeadline>Text</MainHeadline><Pgraph>The pure-tone audiogram is the main clinical diagnostic used for assessing hearing loss and provides the basis for prescribing hearing-aid amplification to restore audibility. However, the audiogram does not necessarily reflect the hearing-in-noise deficits that can remain when audibility has been restored, which can manifest themselves, e.g., in poor speech understanding in adverse conditions. A clinical measure of such deficits would thus be highly useful for prescribing advanced hearing-aid features designed to counteract hearing-in-noise deficits, such as powerful deep neural network (DNN) based noise reduction algorithms.</Pgraph><Pgraph>To enable assessment of hearing-in-noise deficits in the clinic, we have conducted multiple research studies optimizing spectro-temporal modulation (STM) detection tests for hearing-impaired listeners such that the measured STM performance predicts the listeners&#8217; speech-in-noise performance <TextLink reference="1"></TextLink>, <TextLink reference="2"></TextLink>. These studies resulted in the development of a clinical test paradigm called the Audible Contrast Threshold (ACTTM) test, a novel quick-and-simple clinical STM detection test with built-in audibility compensation <TextLink reference="3"></TextLink>.</Pgraph><Pgraph>We evaluated ACT in a large-scale clinical study with 100&#43; participants in terms of test-retest reliability, testing time, and predictive power with regard to aided speech-in-noise performance. Speech reception thresholds (SRTs) were measured with participants using Oticon More 1 hearing aids in a challenging setting with spatially distributed speech interferers. Four different hearing-aid settings were tested: amplification only, mild directionality and noise reduction (DIR&#43;NR), medium DIR&#43;NR, and strong DIR&#43;NR, using Oticon&#8217;s DNN-based help-in-noise feature.</Pgraph><Pgraph>The ACT test was found to be quick (&#60;2 minutes) and reliable. On the group level, SRTs were highest for the amplification-only setting and decreased with increasing levels of DIR&#43;NR processing. The individual SRTs collected with amplification only were strongly correlated with ACT and &#8211; to a lesser extent &#8211; with the 4-frequency pure-tone average (PTA4). The predictive power of ACT and PTA4 was found to be complementary, as they both contributed significantly to predicting the amplification-only SRTs in a two-predictor linear regression model. The individual SRT benefit induced by different levels of DIR&#43;NR varied substantially across participants, with poor performers benefiting most from strong DIR&#43;NR. A dedicated analysis showed that the variability in SRT benefit was correlated with the closedness of acoustic coupling and with ACT.</Pgraph><Pgraph>Overall, the results suggest that ACT is a clinically viable hearing-in-noise test that can be used to prescribe advanced hearing-aid features, especially DIR&#43;NR features that improve the speech-to-noise ratio. Special attention should be paid to the effects of acoustic coupling on the real-life efficacy of DIR&#43;NR processing.</Pgraph></TextBlock>
    <References linked="yes">
      <Reference refNo="1">
        <RefAuthor>Zaar J</RefAuthor>
        <RefAuthor>Simonsen LB</RefAuthor>
        <RefAuthor>Dau T</RefAuthor>
        <RefAuthor>Laugesen S</RefAuthor>
        <RefTitle>Toward a clinically viable spectro-temporal modulation test for predicting supra-threshold speech reception in hearing-impaired listeners</RefTitle>
        <RefYear>2023</RefYear>
        <RefJournal>Hear Res</RefJournal>
        <RefPage>108650</RefPage>
        <RefTotal>Zaar J, Simonsen LB, Dau T, Laugesen S. Toward a clinically viable spectro-temporal modulation test for predicting supra-threshold speech reception in hearing-impaired listeners. Hear Res. 2023 Jan;427:108650. DOI: 10.1016&#47;j.heares.2022.108650</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1016&#47;j.heares.2022.108650</RefLink>
      </Reference>
      <Reference refNo="2">
        <RefAuthor>Zaar J</RefAuthor>
        <RefAuthor>Simonsen LB</RefAuthor>
        <RefAuthor>Laugesen S</RefAuthor>
        <RefTitle>A spectro-temporal modulation test for predicting speech reception in hearing-impaired listeners with hearing aids</RefTitle>
        <RefYear>2024</RefYear>
        <RefJournal>Hear Res</RefJournal>
        <RefPage>108949</RefPage>
        <RefTotal>Zaar J, Simonsen LB, Laugesen S. A spectro-temporal modulation test for predicting speech reception in hearing-impaired listeners with hearing aids. Hear Res. 2024 Mar 1;443:108949. DOI: 10.1016&#47;j.heares.2024.108949</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1016&#47;j.heares.2024.108949</RefLink>
      </Reference>
      <Reference refNo="3">
        <RefAuthor>Zaar J</RefAuthor>
        <RefAuthor>Simonsen LB</RefAuthor>
        <RefAuthor>Sanchez-Lopez R</RefAuthor>
        <RefAuthor>Laugesen S</RefAuthor>
        <RefTitle>The Audible Contrast Threshold (ACT) test: A clinical spectro-temporal modulation detection test</RefTitle>
        <RefYear>2024</RefYear>
        <RefJournal>Hear Res</RefJournal>
        <RefPage>109103</RefPage>
        <RefTotal>Zaar J, Simonsen LB, Sanchez-Lopez R, Laugesen S. The Audible Contrast Threshold (ACT) test: A clinical spectro-temporal modulation detection test. Hear Res. 2024 Nov;453:109103. DOI: 10.1016&#47;j.heares.2024.109103</RefTotal>
        <RefLink>http:&#47;&#47;dx.doi.org&#47;10.1016&#47;j.heares.2024.109103</RefLink>
      </Reference>
    </References>
    <Media>
      <Tables>
        <NoOfTables>0</NoOfTables>
      </Tables>
      <Figures>
        <NoOfPictures>0</NoOfPictures>
      </Figures>
      <InlineFigures>
        <NoOfPictures>0</NoOfPictures>
      </InlineFigures>
      <Attachments>
        <NoOfAttachments>0</NoOfAttachments>
      </Attachments>
    </Media>
  </OrigData>
</GmsArticle>