<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="research-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Aging</journal-id><journal-id journal-id-type="publisher-id">aging</journal-id><journal-id journal-id-type="index">31</journal-id><journal-title>JMIR Aging</journal-title><abbrev-journal-title>JMIR Aging</abbrev-journal-title><issn pub-type="epub">2561-7605</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v8i1e63686</article-id><article-id pub-id-type="doi">10.2196/63686</article-id><article-categories><subj-group subj-group-type="heading"><subject>Original Paper</subject></subj-group></article-categories><title-group><article-title>Using Deep Learning to Perform Automatic Quantitative Measurement of Masseter and Tongue Muscles in Persons With Dementia: Cross-Sectional Study</article-title></title-group><contrib-group><contrib contrib-type="author"><name name-style="western"><surname>Imani</surname><given-names>Mahdi</given-names></name><degrees>PhD, Biomed Eng</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Borda</surname><given-names>Miguel G</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Vogrin</surname><given-names>Sara</given-names></name><degrees>MD, MSc</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Meijering</surname><given-names>Erik</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Aarsland</surname><given-names>Dag</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author" corresp="yes"><name name-style="western"><surname>Duque</surname><given-names>Gustavo</given-names></name><degrees>MD, PhD</degrees><xref ref-type="aff" rid="aff6">6</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Medicine, Melbourne Medical School, University of Melbourne</institution><addr-line>St. Albans</addr-line><country>Australia</country></aff><aff id="aff2"><institution>Centre for Age-Related Medicine (SESAM), Stavanger University Hospital</institution><addr-line>Stavanger</addr-line><country>Norway</country></aff><aff id="aff3"><institution>Department of Neurology, Cl&#x00ED;nica Universidad de Navarra</institution><addr-line>Pamplona</addr-line><country>Spain</country></aff><aff id="aff4"><institution>School of Computer Science and Engineering, UNSW Sydney</institution><addr-line>Sydney</addr-line><country>Australia</country></aff><aff id="aff5"><institution>Department of Old Age Psychiatry, Institute of Psychiatry, Psychology, and Neuroscience, King's College London</institution><addr-line>London</addr-line><country>United Kingdom</country></aff><aff id="aff6"><institution>Bone, Muscle &#x0026; Geroscience Group, Research Institute of the McGill University Health Centre, McGill University</institution><addr-line>1001 Decarie Blvd, Room EM1.3226</addr-line><addr-line>Montreal</addr-line><addr-line>QC</addr-line><country>Canada</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>Gray</surname><given-names>Michelle</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Gang</surname><given-names>Qiangqiang</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Zhao</surname><given-names>Zihao</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Gustavo Duque, MD, PhD, Bone, Muscle &#x0026; Geroscience Group, Research Institute of the McGill University Health Centre, McGill University, 1001 Decarie Blvd, Room EM1.3226, Montreal, QC, H4A 3J1, Canada, 1 514 934 1934; <email>gustavo.duque@mcgill.ca</email></corresp></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>19</day><month>3</month><year>2025</year></pub-date><volume>8</volume><elocation-id>e63686</elocation-id><history><date date-type="received"><day>26</day><month>06</month><year>2024</year></date><date date-type="rev-recd"><day>17</day><month>12</month><year>2024</year></date><date date-type="accepted"><day>02</day><month>01</month><year>2025</year></date></history><copyright-statement>&#x00A9; Mahdi Imani, Miguel G Borda, Sara Vogrin, Erik Meijering, Dag Aarsland, Gustavo Duque. Originally published in JMIR Aging (<ext-link ext-link-type="uri" xlink:href="https://aging.jmir.org">https://aging.jmir.org</ext-link>), 19.3.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Aging, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://aging.jmir.org">https://aging.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://aging.jmir.org/2025/1/e63686"/><abstract><sec><title>Background</title><p>Sarcopenia (loss of muscle mass and strength) increases adverse outcomes risk and contributes to cognitive decline in older adults. Accurate methods to quantify muscle mass and predict adverse outcomes, particularly in older persons with dementia, are still lacking.</p></sec><sec><title>Objective</title><p>This study&#x2019;s main objective was to assess the feasibility of using deep learning techniques for segmentation and quantification of musculoskeletal tissues in magnetic resonance imaging (MRI) scans of the head in patients with neurocognitive disorders. This study aimed to pave the way for using automated techniques for opportunistic detection of sarcopenia in patients with neurocognitive disorder.</p></sec><sec sec-type="methods"><title>Methods</title><p>In a cross-sectional analysis of 53 participants, we used 7 U-Net-like deep learning models to segment 5 different tissues in head MRI images and used the Dice similarity coefficient and average symmetric surface distance as main assessment techniques to compare results. We also analyzed the relationship between BMI and muscle and fat volumes.</p></sec><sec sec-type="results"><title>Results</title><p>Our framework accurately quantified masseter and subcutaneous fat on the left and right sides of the head and tongue muscle (mean Dice similarity coefficient 92.4%). A significant correlation exists between the area and volume of tongue muscle, left masseter muscle, and BMI.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Our study demonstrates the successful application of a deep learning model to quantify muscle volumes in head MRI in patients with neurocognitive disorders. This is a promising first step toward clinically applicable artificial intelligence and deep learning methods for estimating masseter and tongue muscle and predicting adverse outcomes in this population.</p></sec></abstract><kwd-group><kwd>artificial intelligence</kwd><kwd>machine learning</kwd><kwd>sarcopenia</kwd><kwd>dementia</kwd><kwd>masseter muscle</kwd><kwd>tongue muscle</kwd><kwd>deep learning</kwd><kwd>head</kwd><kwd>tongue</kwd><kwd>face</kwd><kwd>magnetic resonance imaging</kwd><kwd>MRI</kwd><kwd>image</kwd><kwd>imaging</kwd><kwd>muscle</kwd><kwd>muscles</kwd><kwd>neural network</kwd><kwd>aging</kwd><kwd>gerontology</kwd><kwd>older adults</kwd><kwd>geriatrics</kwd><kwd>older adult health</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Age-related muscle wasting and neurodegeneration, clinically presented as sarcopenia and dementia, respectively, are the major drivers of frailty, falls, and disability in older adults worldwide [<xref ref-type="bibr" rid="ref1">1</xref>]. Sarcopenia is characterized by loss of muscle mass, strength, and function in older adults. Aging is the leading risk factor, but conditions such as chronic diseases, inflammation, sedentarism, and malnutrition promote sarcopenia onset and progression [<xref ref-type="bibr" rid="ref2">2</xref>]. Sarcopenia has a 10% overall prevalence globally in older persons, 29% in the community, 14%-33% in long-term care settings, and up to 50% in the very old (&#x003E;80) [<xref ref-type="bibr" rid="ref3">3</xref>,<xref ref-type="bibr" rid="ref4">4</xref>]. Despite being a common and relevant health-related condition, it is unseen and underdiagnosed, particularly in older persons with cognitive disorders. To diagnose sarcopenia, measurement of muscle mass, muscle performance, and strength is necessary [2]. Estimating muscle performance and strength is accessible and cheap with traditional methods, such as gait speed and grip strength, respectively [<xref ref-type="bibr" rid="ref5">5</xref>]. However, techniques such as dual X-ray absorptiometry or body magnetic resonance imaging (MRI) are necessary to accurately assess lean or muscle mass. These methods can increase costs and time and are impractical in settings such as dementia clinics [<xref ref-type="bibr" rid="ref6">6</xref>].</p><p>Dementia patients are highly affected by sarcopenia, with a prevalence of around 60%&#x2010;70% [<xref ref-type="bibr" rid="ref7">7</xref>,<xref ref-type="bibr" rid="ref8">8</xref>]. People living with neurodegenerative diseases are more prone to experience difficulties due to malnutrition, being sedentary, and falling; therefore, having sarcopenia increases the risk of adverse outcomes. Sarcopenia is not only a risk factor for adverse outcomes for those with dementia but also promotes cognitive loss in healthy older adults [<xref ref-type="bibr" rid="ref9">9</xref>]. Therefore, diagnosing sarcopenia in people with neurodegenerative diseases is relevant and necessary.</p><p>Head MRI is a widely used diagnostic method for assessing dementia and Alzheimer disease (AD), as it offers intricate representations of the brain&#x2019;s anatomy and physiology. In clinical practice, MRI is often combined with other imaging techniques and cognitive assessments to support the diagnosis of these conditions. The utilization of MRI has seen an upward trend in recent times, as it has become an instrumental tool for the early detection and tracking of the evolution of dementia and AD. According to estimates, 60%&#x2010;80% of patients diagnosed with dementia or AD undergo MRI as part of their diagnostic evaluation [6].</p><p>Mastication and deglutition muscles such as the masseter and tongue are visible in brain MRI scans [<xref ref-type="bibr" rid="ref10">10</xref>,<xref ref-type="bibr" rid="ref11">11</xref>]. These muscles can reflect not only age-associated general muscle decline but also systemic processes due to highly complex interactions with the immune system and the inflammatory response, the nervous system, and the crossroads of several components of the frailty syndrome [<xref ref-type="bibr" rid="ref12">12</xref>,<xref ref-type="bibr" rid="ref13">13</xref>]. Indeed, in a previous publication, we have reported that manually segmented masseter predicts mortality and clinical short-term and long-term outcomes in several contexts [<xref ref-type="bibr" rid="ref14">14</xref>]. Moreover, head muscles such as the tongue and masseter could be cost-effective alternatives to estimate muscle mass in dementia and other common conditions such as head cancer, stroke, or cranioencephalic trauma [<xref ref-type="bibr" rid="ref14">14</xref>,<xref ref-type="bibr" rid="ref15">15</xref>]. However, manual and semi-automatic techniques are labor-intensive and time-consuming, making the image processing task for large studies difficult, expensive, and, most importantly, impractical to apply in a clinical setting. Therefore, in the present study, we aimed to use MRI scans of the head opportunistically to develop an automated deep learning method to evaluate sarcopenia.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Population and Data Source</title><p>The &#x201C;Dementia Study of Western Norway&#x201D; (DemVest) is a long-term study between 2005 and 2013, with ongoing follow-up assessments. Participants were referred from dementia clinics in Hordaland and Rogaland and were insured by the same national insurance scheme. The study&#x2019;s methodology is described elsewhere [<xref ref-type="bibr" rid="ref15">15</xref>]. Those with moderate or severe dementia, delirium, past bipolar or psychotic conditions, terminal illness, or newly diagnosed somatic diseases impacting cognition, function, or participation were excluded.</p><p>For this study, subjects with dementia with Lewy bodies (DLB) or mild AD who had baseline MRI scans were included. Out of 111 participants (85 AD and 26 DLB), 33 AD and 20 DLB participants with MRI images for brain and muscle measurement were selected based on the quality of the images and clear delineation of the regions of interest. The diagnosis of dementia was made in accordance with the Diagnostic and Statistical Manual of Mental Disorders, Fourth Edition (DSM-IV) criteria, and patients were classified as AD or DLB [<xref ref-type="bibr" rid="ref16">16</xref>]. A mini-mental state examination score of &#x2265;20 or a clinical dementia rating global score of 1 was chosen as the definition for mild dementia. The diagnosis was based on inclusion but could be modified with clinical evolution, consensus, and autopsy [<xref ref-type="bibr" rid="ref15">15</xref>]. Participants were evaluated through structured assessments, and medical records were used to gather complete medical history and comorbidity data. In total, 56 participants had pathological diagnoses with 80% accuracy compared to clinical criteria, which reflects a reliable initial clinical diagnosis [<xref ref-type="bibr" rid="ref17">17</xref>].</p><sec id="s2-1-1"><title>Ethical Considerations</title><p>This study was approved by the regional ethics committee (approval code: 2010/633) and the Norwegian authorities for the collection of medical data. All data were handled and kept following national data privacy protocols. All participants signed an informed consent form before inclusion in the study.</p></sec></sec><sec id="s2-2"><title>Imaging</title><p>All images were acquired at baseline. A 1.5-T Philips Intera-scanner was used to obtain MRI images. The acquisition protocol for 3D T1-weighted sequence was as follows: flip angle of 30&#x00B0;, repetition time/echo time of 10.0/4.6 ms, number of excitations of 2, 2-mm slice thickness with 1-mm spacing between the slices (1-mm slices with no gap), matrix of 256&#x00D7;256 pixels, and field of view of 26 cm. Those with movement artifacts and inadequate image quality were removed from the data using visual quality checks. A standardized preprocessing method for harmonizing multiple collections of MRIs was applied, which consisted of movement correction and intensity normalization following previously validated techniques [<xref ref-type="bibr" rid="ref15">15</xref>].</p></sec><sec id="s2-3"><title>Ground Truth Image Segmentation</title><p>Ground truth (GT) images were segmented using interactive pixel techniques made available by SliceOmatic software (TomoVision) following a manual method previously reported [<xref ref-type="bibr" rid="ref18">18</xref>]. The size of the masseter muscle was used as a reference for the selection criteria of the slices. In total, 5 slices were selected from the ones with both right and left masseter muscles at their largest. For each slice, 5 tissues were segmented: left and right masseter muscles, left and right subcutaneous fat, and tongue muscle (<xref ref-type="fig" rid="figure1">Figure 1</xref>). The masseter muscle on each side was used as a reference to segment subcutaneous fat.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>Example of segmented tissues overlaid on the original MRI. (<bold>A</bold>) Right masseter muscle, (<bold>B</bold>) left masseter muscle, (<bold>C</bold>) right subcutaneous fat, (<bold>D</bold>) left subcutaneous fat, and (<bold>E</bold>) tongue muscle.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="aging_v8i1e63686_fig01.png"/></fig></sec><sec id="s2-4"><title>Network Architecture</title><p>We studied and compared 6 different U-Net-like architectures. The original U-Net architecture [<xref ref-type="bibr" rid="ref19">19</xref>] was first designed to segment medical images, and many other researchers have tried to improve its performance by integrating additional techniques into its architecture [<xref ref-type="bibr" rid="ref20">20</xref>]. U-Net consists of a contracting path (encoder) and an expansive path (decoder) with skip connections between these 2 paths. The network learns features from the provided image and the mask at increasingly higher spatial scales by gradually down-sampling to lower resolutions through the encoding path. The expansive path then gradually increases the resolution of the output from the encoding path to the original image size, resulting in a probability map as an output, indicating the chance of each pixel belonging to a specific tissue. One important feature of U-Net is its skip connections, which concatenate feature maps from the encoding path to the corresponding block in the expansive path, making it possible to maintain small details crucial in medical image segmentation.</p><p>We have included 5 variants of U-Net in this study, including Attention U-Net, Dense U-Net, Residual U-Net, Inception U-Net, and U-Net++. Attention U-Net is desirable since it allows the model to focus on specific objects and ignore unnecessary areas [<xref ref-type="bibr" rid="ref21">21</xref>]. In Dense U-Net, the traditional U-Net blocks are replaced with a dense block, enabling the model to segment objects with greater distinction. This feature is important in medical practice since tissues are often very close and sometimes overlap [<xref ref-type="bibr" rid="ref22">22</xref>]. Residual U-Net architecture tries to tackle the vanishing gradient issue, a common problem in designing deep neural networks [<xref ref-type="bibr" rid="ref23">23</xref>]. In most cases, the same organ&#x2019;s size can vary between patients, which can cause limitations on the segmentation capability of the model. By using filters with different sizes, Inception U-Net attempts to overcome this problem [<xref ref-type="bibr" rid="ref24">24</xref>]. Lastly, U-Net++ aids the classic U-Net model to more accurately segment images by providing semantic information from a dense network of skip connections as an intermediary grid between the encoding and decoding paths [<xref ref-type="bibr" rid="ref25">25</xref>]. We also included a Wide U-Net architecture to eliminate the effect of increased trainable parameters. This model has the same architecture as U-Net but with more feature maps per layer (30, 60, 120, 240, and 480) compared to the original U-Net (16, 32, 64, 128, and 256). Hence, it will serve as a control to compare the models with larger numbers of trainable parameters with the base U-Net (<xref ref-type="table" rid="table1">Table 1</xref>).</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Number of trainable parameters for each model.</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Model</td><td align="left" valign="bottom">Trainable parameters</td></tr></thead><tbody><tr><td align="left" valign="top">U-Net</td><td align="left" valign="top">2,164,390</td></tr><tr><td align="left" valign="top">Attention U-Net</td><td align="left" valign="top">2,233,270</td></tr><tr><td align="left" valign="top">U-Net++</td><td align="left" valign="top">2,555,702</td></tr><tr><td align="left" valign="top">Inception U-Net</td><td align="left" valign="top">5,529,526</td></tr><tr><td align="left" valign="top">Residual U-Net</td><td align="left" valign="top">6,877,110</td></tr><tr><td align="left" valign="top">Dense U-Net</td><td align="left" valign="top">7,666,320</td></tr><tr><td align="left" valign="top">Wide U-Net</td><td align="left" valign="top">7,596,306</td></tr></tbody></table></table-wrap></sec><sec id="s2-5"><title>Training Procedure</title><p>All models were trained with the mini-batch stochastic gradient descent algorithm using the Adam optimizer. A batch size of 8 was selected. The learning rate was 0.0001. The training was done for 200 epochs. The values of hyperparameters were empirically tuned for best performance. Categorical cross-entropy (CSE) was selected as the loss function for this study [<xref ref-type="bibr" rid="ref26">26</xref>]. The CSE loss function minimizes the distance between 2 distributions (the predicted labels and the GT labels). CSE is one of the most popular loss functions for image segmentation and has shown excellent performance in muscle segmentation [<xref ref-type="bibr" rid="ref27">27</xref>,<xref ref-type="bibr" rid="ref28">28</xref>]. All experiments were implemented with open-source software: Python (version 3.7.13), TensorFlow (version 2.8.2), and Keras (version 2.8.0).</p></sec><sec id="s2-6"><title>Model Evaluation</title><p>The results of the experiments were evaluated using 2 main measures: Dice similarity coefficient (DSC) and average symmetric surface distance (ASSD). The DSC represents the agreement between the GT labels and predicted labels that models generate:</p><disp-formula id="equWL1"><mml:math id="eqn1"><mml:mtext>DSC</mml:mtext><mml:mfenced separators="|"><mml:mrow><mml:mi>P</mml:mi><mml:mo>,</mml:mo><mml:mi>G</mml:mi></mml:mrow></mml:mfenced><mml:mo>=</mml:mo><mml:mfrac><mml:mrow><mml:mn>2</mml:mn><mml:mo>(</mml:mo><mml:mi>P</mml:mi><mml:mo>&#x2229;</mml:mo><mml:mi>G</mml:mi><mml:mo>)</mml:mo></mml:mrow><mml:mrow><mml:mo>(</mml:mo><mml:mi>P</mml:mi><mml:mo>+</mml:mo><mml:mi>G</mml:mi><mml:mo>)</mml:mo></mml:mrow></mml:mfrac></mml:math></disp-formula><p>where &#x2229; is the intersection and P and G are the 2 labels. DSC ranges between 0 and 1, where 0 indicates no agreement and 1 indicates perfect agreement. In our study, DSC is presented as a percentage.</p><p>The ASSD measures the average distance from pixels on the boundary of predicted labels to corresponding pixels on the boundary of the GT labels:</p><p>where <inline-formula><mml:math id="ieqn1"><mml:msub><mml:mrow><mml:mi>B</mml:mi></mml:mrow><mml:mrow><mml:mi>P</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> and <inline-formula><mml:math id="ieqn2"><mml:msub><mml:mrow><mml:mi>B</mml:mi></mml:mrow><mml:mrow><mml:mi>G</mml:mi></mml:mrow></mml:msub></mml:math></inline-formula> are the boundaries of predicted labels and corresponding reference labels, respectively. <inline-formula><mml:math id="ieqn3"><mml:mstyle displaystyle="true" scriptlevel="0"><mml:mrow><mml:mi>d</mml:mi><mml:mrow><mml:mo>(</mml:mo><mml:mrow><mml:mi>v</mml:mi><mml:mo>,</mml:mo><mml:mi>B</mml:mi></mml:mrow><mml:mo>)</mml:mo></mml:mrow></mml:mrow></mml:mstyle></mml:math></inline-formula> is the shortest Euclidean distance between voxel <inline-formula><mml:math id="ieqn4"><mml:mi>v</mml:mi></mml:math></inline-formula> and boundary <inline-formula><mml:math id="ieqn5"><mml:mi>B</mml:mi></mml:math></inline-formula>. An ASSD of 0 indicates a perfect match between predicted and reference labels. The ASSD was measured in mm.</p><p>We used k-fold cross-validation in evaluating the models. This technique splits the data set into k subsets (folds). The deep learning models are trained on all but one of the subsets (k&#x2013;1), and then the models are evaluated on the subset that was not used for training. This process is repeated k times, and the average of the results is reported. We used k=10.</p><p>Additional metrics, including Jaccard coefficient, precision, recall, sensitivity, specificity, and F<sub>1</sub>-score, are presented in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. The results are shown as mean and standard deviation and median and interquartile range across k-fold (k=10) cross-validation.</p></sec><sec id="s2-7"><title>Statistical Methods</title><p>We explored the association between the BMI and the muscle and fat areas and volumes in the MRI images using individual linear regression models adjusted by sex and age. No adjustments for multiple testing were made. All assumptions were checked. All <italic>P</italic>-values were evaluated at a 5% level. The analysis and graphs were carried out using R (version 4.2.2).</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Segmentation</title><p>DSC and ASSD were used to quantitatively analyze the segmentation results (<xref ref-type="fig" rid="figure2">Figure 2</xref> and <xref ref-type="table" rid="table2">Table 2</xref>). The Dense U-Net model has a higher average DSC and lower ASSD than other models. In contrast, Attention U-Net has the lowest average DSC score compared to other models. This result is confirmed by the higher ASSD for Attention U-Net for all tissues. Other models have shown almost similar results for all areas. Additional metrics have been presented in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>. The results from these metrics confirm the findings of the study mentioned in this section; hence, they were omitted from the main manuscript.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>Box plot of k-fold (k=10) cross-validation results for attention U-Net, dense U-Net, inception U-Net, residual U-Net, U-Net, U-Net++, and wide U-Net. Top: DSC in percentage. Bottom: ASSD in mm.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="aging_v8i1e63686_fig02.png"/></fig><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Mean DSC and ASSD for test and validation results for k-fold (k=10) cross-validation. Standard deviation for the measurements in this table is presented in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" rowspan="2"/><td align="left" valign="bottom" rowspan="2">Model</td><td align="left" valign="bottom" colspan="5">Test set</td><td align="left" valign="bottom" colspan="5">Validation set</td></tr><tr><td align="left" valign="bottom">T<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup></td><td align="left" valign="bottom">LM<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup></td><td align="left" valign="bottom">RM<sup><xref ref-type="table-fn" rid="table2fn3">c</xref></sup></td><td align="left" valign="bottom">LSF<sup><xref ref-type="table-fn" rid="table2fn4">d</xref></sup></td><td align="left" valign="bottom">RSF<sup><xref ref-type="table-fn" rid="table2fn5">e</xref></sup></td><td align="left" valign="bottom">T</td><td align="left" valign="bottom">LM</td><td align="left" valign="bottom">RM</td><td align="left" valign="bottom">LSF</td><td align="left" valign="bottom">RSF</td></tr></thead><tbody><tr><td align="left" valign="top" rowspan="6">DSC<sup><xref ref-type="table-fn" rid="table2fn6">f</xref></sup><break/>(%)</td><td align="left" valign="top">Attention U-Net</td><td align="left" valign="top">93.57</td><td align="left" valign="top">93.4</td><td align="left" valign="top">93.19</td><td align="left" valign="top">90.9</td><td align="left" valign="top">91.26</td><td align="left" valign="top">93.43</td><td align="left" valign="top">93.38</td><td align="left" valign="top">93.23</td><td align="left" valign="top">91.05</td><td align="left" valign="top">91.43</td></tr><tr><td align="left" valign="top">Dense U-Net</td><td align="left" valign="top">94.12</td><td align="left" valign="top">94.66</td><td align="left" valign="top">94.4</td><td align="left" valign="top">91.76</td><td align="left" valign="top">92.22</td><td align="left" valign="top">94.07</td><td align="left" valign="top">94.7</td><td align="left" valign="top">94.49</td><td align="left" valign="top">91.89</td><td align="left" valign="top">92.42</td></tr><tr><td align="left" valign="top">Inception U-Net</td><td align="left" valign="top">94.07</td><td align="left" valign="top">94.12</td><td align="left" valign="top">93.69</td><td align="left" valign="top">91.36</td><td align="left" valign="top">91.59</td><td align="left" valign="top">93.85</td><td align="left" valign="top">94.27</td><td align="left" valign="top">93.73</td><td align="left" valign="top">91.45</td><td align="left" valign="top">91.81</td></tr><tr><td align="left" valign="top">Residual U-Net</td><td align="left" valign="top">93.71</td><td align="left" valign="top">94.03</td><td align="left" valign="top">93.53</td><td align="left" valign="top">91.05</td><td align="left" valign="top">91.46</td><td align="left" valign="top">93.74</td><td align="left" valign="top">94.1</td><td align="left" valign="top">93.67</td><td align="left" valign="top">91.35</td><td align="left" valign="top">91.68</td></tr><tr><td align="left" valign="top">U-Net</td><td align="left" valign="top">93.83</td><td align="left" valign="top">93.94</td><td align="left" valign="top">93.46</td><td align="left" valign="top">90.97</td><td align="left" valign="top">91.49</td><td align="left" valign="top">93.88</td><td align="left" valign="top">93.93</td><td align="left" valign="top">93.52</td><td align="left" valign="top">91.18</td><td align="left" valign="top">91.67</td></tr><tr><td align="left" valign="top">Wide U-Net</td><td align="left" valign="top">94</td><td align="left" valign="top">94.29</td><td align="left" valign="top">93.93</td><td align="left" valign="top">91.35</td><td align="left" valign="top">91.68</td><td align="left" valign="top">94.03</td><td align="left" valign="top">94.3</td><td align="left" valign="top">93.99</td><td align="left" valign="top">91.59</td><td align="left" valign="top">92.01</td></tr><tr><td align="left" valign="top" rowspan="6">ASSD<sup><xref ref-type="table-fn" rid="table2fn7">g</xref></sup><break/>(mm)</td><td align="left" valign="top">Attention U-Net</td><td align="left" valign="top">1.47</td><td align="left" valign="top">0.63</td><td align="left" valign="top">0.66</td><td align="left" valign="top">0.59</td><td align="left" valign="top">0.62</td><td align="left" valign="top">1.53</td><td align="left" valign="top">0.67</td><td align="left" valign="top">0.65</td><td align="left" valign="top">0.6</td><td align="left" valign="top">0.59</td></tr><tr><td align="left" valign="top">Dense U-Net</td><td align="left" valign="top">1.3</td><td align="left" valign="top">0.52</td><td align="left" valign="top">0.53</td><td align="left" valign="top">0.53</td><td align="left" valign="top">0.54</td><td align="left" valign="top">1.37</td><td align="left" valign="top">0.51</td><td align="left" valign="top">0.53</td><td align="left" valign="top">0.52</td><td align="left" valign="top">0.54</td></tr><tr><td align="left" valign="top">Inception U-Net</td><td align="left" valign="top">1.33</td><td align="left" valign="top">0.57</td><td align="left" valign="top">0.61</td><td align="left" valign="top">0.59</td><td align="left" valign="top">0.61</td><td align="left" valign="top">1.44</td><td align="left" valign="top">0.56</td><td align="left" valign="top">0.63</td><td align="left" valign="top">0.6</td><td align="left" valign="top">0.59</td></tr><tr><td align="left" valign="top">Residual U-Net</td><td align="left" valign="top">1.39</td><td align="left" valign="top">0.58</td><td align="left" valign="top">0.62</td><td align="left" valign="top">0.61</td><td align="left" valign="top">0.61</td><td align="left" valign="top">1.38</td><td align="left" valign="top">0.58</td><td align="left" valign="top">0.63</td><td align="left" valign="top">0.58</td><td align="left" valign="top">0.6</td></tr><tr><td align="left" valign="top">U-Net</td><td align="left" valign="top">1.41</td><td align="left" valign="top">0.6</td><td align="left" valign="top">0.65</td><td align="left" valign="top">0.62</td><td align="left" valign="top">0.61</td><td align="left" valign="top">1.45</td><td align="left" valign="top">0.6</td><td align="left" valign="top">0.64</td><td align="left" valign="top">0.61</td><td align="left" valign="top">0.59</td></tr><tr><td align="left" valign="top">Wide U-Net</td><td align="left" valign="top">1.35</td><td align="left" valign="top">0.55</td><td align="left" valign="top">0.58</td><td align="left" valign="top">0.56</td><td align="left" valign="top">0.6</td><td align="left" valign="top">1.34</td><td align="left" valign="top">0.55</td><td align="left" valign="top">0.58</td><td align="left" valign="top">0.56</td><td align="left" valign="top">0.57</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>T: tongue muscle.</p></fn><fn id="table2fn2"><p><sup>b</sup>LM: left masseter muscle.</p></fn><fn id="table2fn3"><p><sup>c</sup>RM: right masseter muscle.</p></fn><fn id="table2fn4"><p><sup>d</sup>LSF: left subcutaneous fat.</p></fn><fn id="table2fn5"><p><sup>e</sup>RSF: right subcutaneous fat.</p></fn><fn id="table2fn6"><p><sup>f</sup>DSC: Dice similarity coefficient.</p></fn><fn id="table2fn7"><p><sup>g</sup>ASSD: average symmetric surface distance.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-2"><title>Clinical Validation</title><p>To prove the clinical validity of the measurements, we evaluated the association between the segmented muscles and subcutaneous fat and BMI. We found a significant positive association between tongue muscle, left masseter muscle, and left and right subcutaneous fat and BMI (<xref ref-type="fig" rid="figure3">Figure 3</xref>). The area of a single slice as well as the volume of 5 slices per patient was calculated for this experiment. The results were adjusted by age and sex with <italic>P</italic>&#x003C;.05 (<xref ref-type="supplementary-material" rid="app2">Multimedia Appendix 2</xref>).</p><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Individual linear regression repressing the relationship between quantitative results (area of a single slice and volume of 5 slices) from the Dense U-Net model and BMI.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="aging_v8i1e63686_fig03.png"/></fig></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><p>In this study, we evaluated the performance of six deep learning models for segmentation of the masseter muscles, subcutaneous fat, and tongue muscle in MRI images of the head. Several variations of the U-Net architecture were trained and tested using k-fold cross-validation. The use of deep neural networks for segmenting musculoskeletal tissues in patients with AD and LBD is a novel experimental contribution to the deep learning-based segmentation literature as well as the clinical literature.</p><p>Our study demonstrated that the Dense U-Net model performed superiorly to the other models in all evaluated regions by achieving an overall DSC of 93.43% and ASSD of 0.68 mm on the test data. The remaining models demonstrated comparable outcomes, except the Attention U-Net, which achieved slightly less accurate results in all regions with an overall DSC of 92.46% and ASSD of 0.79 mm on the test data. Notably, despite having a comparable number of trainable parameters, the Dense U-Net model demonstrated a higher DSC and lower ASSD than the Wide U-Net model. The Dense U-Net model achieves superior performance with a similar number of parameters, suggesting that its architectural efficiency, rather than parameter quantity alone, drives this improvement.</p><p>The validation set produced similar results to the training set, indicating that the trained models did not suffer from overfitting. Although we applied data augmentation to the training data set (results not shown), it did not significantly improve the accuracy of the segmentation models.</p><p>Furthermore, we observed a significant correlation between the results and BMI, a well-established measure of nutrition and body composition. This underscores the validity and clinical relevance of this method. If these localized muscle measurements correlate with BMI, it suggests that they may reflect broader nutritional and body composition states.</p><p>Sarcopenia is a relatively newly recognized condition for which neuromuscular degeneration, central nervous system alpha motor unit loss, and fat infiltration into muscle are the most distinctive proven and observed pathogenic features, leading to loss of muscle mass and strength and predisposing to physical frailty [<xref ref-type="bibr" rid="ref2">2</xref>]. We have proposed that non-invasive assessment of intermuscular adipose tissue and muscle mass by image analysis could constitute a viable method to diagnose sarcopenia and predict its associated outcomes, the clinical impact of which is also under study by our group [<xref ref-type="bibr" rid="ref29">29</xref>].</p><p>The validity and clinical implications of measuring the masseter muscle have been shown in previous studies [<xref ref-type="bibr" rid="ref30">30</xref>,<xref ref-type="bibr" rid="ref31">31</xref>]. In recent work by our group, we compared the diagnostic capacity for sarcopenia between the gold-standard dual X-ray absorptiometry and our measurements of head muscles. The results showed that both methods had equivalent accuracy [<xref ref-type="bibr" rid="ref32">32</xref>]. In older adults with glioblastoma, a decreased masseter diameter on preoperative imaging was associated with shorter overall survival and 90-day mortality after surgical resection [<xref ref-type="bibr" rid="ref33">33</xref>]. In addition, low masseter muscle was significantly associated with worse overall survival in patients aged 65 years or older, diagnosed with squamous cell carcinoma of the head and neck and treated with curative intent [<xref ref-type="bibr" rid="ref34">34</xref>]. Another study evaluated post-operative results after carotid endarterectomy; low masseter mass was associated with a prolonged hospital stay and recurrent stroke within 5 years [<xref ref-type="bibr" rid="ref35">35</xref>]. In another study, preoperative masseter mass was a predictor of postoperative pneumonia in patients with esophageal cancer [<xref ref-type="bibr" rid="ref36">36</xref>]. Additionally, other studies have shown that the masseter muscle can be used as a nutritional biomarker. The masseter muscle, analyzed via computed tomography (CT) anthropometry, showed a statistically significant association with systemic nutritional biomarkers [<xref ref-type="bibr" rid="ref37">37</xref>]. On the other hand, the tongue has shown to be a good marker of prognosis, as tongue strength has shown to be helpful in diagnosing sarcopenia [<xref ref-type="bibr" rid="ref38">38</xref>]. Previous studies from our group also report that tongue muscle volume is correlated with malnutrition and even brain structures in patients with dementia [<xref ref-type="bibr" rid="ref18">18</xref>,<xref ref-type="bibr" rid="ref29">29</xref>].</p><p>Therefore, the approach we present in this paper can be opportunistically used to quantify muscle volumes and investigate the implications of having low muscle mass in the masseter or the tongue in people with brain, head, and neck diseases. Thus, it is an important first step toward developing a more efficient method to estimate masseter and tongue muscle with a better capacity to be implemented in clinical practice. Manual and semi-automatic techniques have been employed in several studies for masseter muscle segmentation in MRI [<xref ref-type="bibr" rid="ref39">39</xref>]. A recent study used shape determination to segment the masseter muscle in MRI images [<xref ref-type="bibr" rid="ref40">40</xref>]. In this approach, a manual contour for 8 slices must be defined by the user, and the model then determines the shape for the remaining scans, reducing the time and labor required for segmentation. However, this technique still requires manual segmentation, which can be time-consuming and prone to user error compared to our approach.</p><p>Model-based techniques have also been explored for the segmentation of the mastication muscle [<xref ref-type="bibr" rid="ref41">41</xref>,<xref ref-type="bibr" rid="ref42">42</xref>]. Although these techniques have shown high accuracy (&#x003E;90%), there must be a distinct boundary between the masseter muscle and surrounding tissues to ensure accuracy. This distinct boundary refers to visible differences in intensity, texture, or anatomical structure on imaging, which enable the models to accurately separate the muscle from adjacent tissues, such as fat or bone.</p><p>Machine and deep learning approaches have been widely used to segment muscles in various body parts. CT scans and cone beam CT scans have primarily been investigated for measuring the head organs, including masseter muscle using these techniques [<xref ref-type="bibr" rid="ref43">43</xref>-<xref ref-type="bibr" rid="ref45">45</xref>]. In a previous study, a basic U-Net model was applied to segment the masseter muscle in head CT scans to investigate hemifacial microsomia [<xref ref-type="bibr" rid="ref46">46</xref>]. The mean DSC reported in that study was 79.4% for the experimental group and 82.4% for the group with mandible deformities, which are lower compared to the results obtained in our study. In a study using CT scan of the head for segmentation of masticatory muscles, deep learning techniques were superior to atlas-based techniques, achieving a mean DSC of 83% [<xref ref-type="bibr" rid="ref47">47</xref>]. To the best of our knowledge, the techniques considered in our study have not yet been used to segment musculoskeletal tissues in MRI images of the head.</p><p>Our study had some limitations. The Attention U-Net model demonstrated the lowest DSC values and the highest ASSD values among the evaluated models, indicating suboptimal segmentation accuracy across tissues. This underperformance may be attributed to the Attention mechanism&#x2019;s inability to effectively focus on the target tissue, leading to dispersed attention, particularly in smaller structures or regions with indistinct boundaries. To address this limitation, future work could involve refining the Attention mechanism to enhance its specificity and focus on regions of interest. Alternatively, exploring models that prioritize multi-scale feature extraction and detail preservation may provide improved segmentation performance, particularly for small or complex anatomical regions.</p><p>The DemVest study had some limitations that may have impacted the results. Selection bias might have been present if patients with more complex health conditions were included, as primary care referrals were used. The study was not specifically designed for the paper&#x2019;s purpose, which could limit the data analysis and interaction control. For example, the absence of a healthy control group prevents us from determining whether the observed muscle volume characteristics are specific to individuals with AD or neurocognitive disorders, or if they represent normal variations associated with aging. However, the primary objective of this study was to demonstrate the feasibility and accuracy of our deep learning model in quantifying muscle volumes using head MRI, rather than to establish definitive differences between diseased and healthy populations. In addition, the sample size is relatively small, and there is a risk that our results may not generalize to larger populations. Additionally, the MRI scans were obtained from a single center using a single MRI machine, which may affect the model&#x2019;s generalizability. Longitudinal analyses were not conducted because imaging was only performed at baseline, in line with the initial primary objectives of the DemVest study and the resources allocated for image acquisition.</p><p>These limitations should be considered when interpreting this study&#x2019;s results and addressed in future studies seeking broader application of the proposed approach. Whether masseter and tongue volumes in these muscles correlate with lean body mass and inflammaging and could be predictors of neurocognitive conditions and their associated outcomes remains unknown.</p><p>On the other hand, this study has several strengths, including well-characterized data, detailed and exhaustive diagnosis to correctly identify people with dementia, and an automated quality control and analysis pipeline. Additionally, the results fill a gap in the literature and provide insights into a possible method to efficiently diagnose sarcopenia in context when a head MRI is already available.</p><p>In summary, to our knowledge, this is the first study that validates deep learning methods that could be easily implemented in clinical practice to measure masseter and tongue muscle volumes with a solid potential to become biomarkers with strong predictive value for adverse outcomes in older persons with dementia. Since imaging is widely used in memory clinics worldwide, this opportunistic approach to image analyses could become standard practice in those settings. However, further large longitudinal studies are still required.</p></sec></body><back><ack><p>We want to thank the participants, researchers, and technical staff that made the DemVest study possible, as well as the staff and facilities provided by (SESAM) Centre for age-related medicine at Stavanger, Norway. This work was supported by the Norwegian government, through hospital owner Helse Vest (Western Norway Regional Health Authority) number 911973. It is also funded by the National Institute for Health Research (NIHR) Biomedical Research Centre at South London and Maudsley NHS Foundation Trust, King's College London, and the Australian Institute for Musculoskeletal Science (AIMSS). Mahdi Imani is supported by an Australian Government Research Training Program (TRP) Scholarship.</p></ack><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1">AD</term><def><p>Alzheimer disease</p></def></def-item><def-item><term id="abb2">ASSD</term><def><p>average symmetric surface distance</p></def></def-item><def-item><term id="abb3">CSE</term><def><p>categorical cross-entropy</p></def></def-item><def-item><term id="abb4">CT</term><def><p>computed tomography</p></def></def-item><def-item><term id="abb5">DLB</term><def><p>dementia with Lewy bodies</p></def></def-item><def-item><term id="abb6">DSC</term><def><p>Dice similarity coefficient</p></def></def-item><def-item><term id="abb7">DSM-IV</term><def><p>Diagnostic and Statistical Manual of Mental Disorders, Fourth Edition</p></def></def-item><def-item><term id="abb8">GT</term><def><p>ground truth</p></def></def-item><def-item><term id="abb9">MRI</term><def><p>magnetic resonance imaging</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="report"><person-group person-group-type="author"><name name-style="western"><surname>WHO</surname><given-names>G</given-names> </name></person-group><article-title>WHO Methods and Data Sources for Global Burden of Disease Estimates 2000&#x2013; 2011</article-title><year>2020</year><publisher-name>Department of Health Statistics and Information Systems</publisher-name></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cruz-Jentoft</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Bahat</surname><given-names>G</given-names> </name><name name-style="western"><surname>Bauer</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Sarcopenia: revised European consensus on definition and diagnosis</article-title><source>Age Ageing</source><year>2019</year><month>01</month><day>1</day><volume>48</volume><issue>1</issue><fpage>16</fpage><lpage>31</lpage><pub-id pub-id-type="doi">10.1093/ageing/afy169</pub-id><pub-id pub-id-type="medline">30312372</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Shafiee</surname><given-names>G</given-names> </name><name name-style="western"><surname>Keshtkar</surname><given-names>A</given-names> </name><name name-style="western"><surname>Soltani</surname><given-names>A</given-names> </name><name name-style="western"><surname>Ahadi</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Larijani</surname><given-names>B</given-names> </name><name name-style="western"><surname>Heshmat</surname><given-names>R</given-names> </name></person-group><article-title>Prevalence of sarcopenia in the world: a systematic review and meta- analysis of general population studies</article-title><source>J Diabetes Metab Disord</source><year>2017</year><volume>16</volume><fpage>21</fpage><pub-id pub-id-type="doi">10.1186/s40200-017-0302-x</pub-id><pub-id pub-id-type="medline">28523252</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cruz-Jentoft</surname><given-names>AJ</given-names> </name><name name-style="western"><surname>Landi</surname><given-names>F</given-names> </name><name name-style="western"><surname>Schneider</surname><given-names>SM</given-names> </name><etal/></person-group><article-title>Prevalence of and interventions for sarcopenia in ageing adults: a systematic review. Report of the International Sarcopenia Initiative (EWGSOP and IWGS)</article-title><source>Age Ageing</source><year>2014</year><month>11</month><volume>43</volume><issue>6</issue><fpage>748</fpage><lpage>759</lpage><pub-id pub-id-type="doi">10.1093/ageing/afu115</pub-id><pub-id pub-id-type="medline">25241753</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Morley</surname><given-names>JE</given-names> </name></person-group><article-title>Sarcopenia: diagnosis and treatment</article-title><source>J Nutr Health Aging</source><year>2008</year><volume>12</volume><issue>7</issue><fpage>452</fpage><lpage>456</lpage><pub-id pub-id-type="doi">10.1007/BF02982705</pub-id><pub-id pub-id-type="medline">18615226</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tan</surname><given-names>LF</given-names> </name><name name-style="western"><surname>Lim</surname><given-names>ZY</given-names> </name><name name-style="western"><surname>Choe</surname><given-names>R</given-names> </name><name name-style="western"><surname>Seetharaman</surname><given-names>S</given-names> </name><name name-style="western"><surname>Merchant</surname><given-names>R</given-names> </name></person-group><article-title>Screening for frailty and sarcopenia among older persons in medical outpatient clinics and its associations with healthcare burden</article-title><source>J Am Med Dir Assoc</source><year>2017</year><month>07</month><day>1</day><volume>18</volume><issue>7</issue><fpage>583</fpage><lpage>587</lpage><pub-id pub-id-type="doi">10.1016/j.jamda.2017.01.004</pub-id><pub-id pub-id-type="medline">28242192</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cobo</surname><given-names>CS</given-names> </name><name name-style="western"><surname>P&#x00E9;rez</surname><given-names>V</given-names> </name><name name-style="western"><surname>Hermosilla</surname><given-names>C</given-names> </name><name name-style="western"><surname>Nu&#x00F1;ez</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lorena</surname><given-names>PD</given-names> </name></person-group><article-title>Prevalence of sarcopenia in elderly with dementia institutionalized: a multicenter study</article-title><source>J Aging Res</source><year>2014</year><volume>3</volume><fpage>178</fpage><lpage>181</lpage><pub-id pub-id-type="doi">10.14283/jarcp.2014.31</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sperlich</surname><given-names>E</given-names> </name><name name-style="western"><surname>Fleiner</surname><given-names>T</given-names> </name><name name-style="western"><surname>Zijlstra</surname><given-names>W</given-names> </name><name name-style="western"><surname>Haussermann</surname><given-names>P</given-names> </name><name name-style="western"><surname>Morat</surname><given-names>T</given-names> </name></person-group><article-title>Sarcopenia in geriatric psychiatry: feasibility of the diagnostic process and estimation of prevalence within a hospital context</article-title><source>J Cachexia Sarcopenia Muscle</source><year>2021</year><month>10</month><volume>12</volume><issue>5</issue><fpage>1153</fpage><lpage>1160</lpage><pub-id pub-id-type="doi">10.1002/jcsm.12748</pub-id><pub-id pub-id-type="medline">34151538</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Peng</surname><given-names>TC</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>WL</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>LW</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>YW</given-names> </name><name name-style="western"><surname>Kao</surname><given-names>TW</given-names> </name></person-group><article-title>Sarcopenia and cognitive impairment: a systematic review and meta-analysis</article-title><source>Clin Nutr</source><year>2020</year><month>09</month><volume>39</volume><issue>9</issue><fpage>2695</fpage><lpage>2701</lpage><pub-id pub-id-type="doi">10.1016/j.clnu.2019.12.014</pub-id><pub-id pub-id-type="medline">31917049</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Spronsen</surname><given-names>PH</given-names> </name><name name-style="western"><surname>Weijs</surname><given-names>WA</given-names> </name><name name-style="western"><surname>Valk</surname><given-names>J</given-names> </name><name name-style="western"><surname>Prahl-Andersen</surname><given-names>B</given-names> </name><name name-style="western"><surname>van Ginkel</surname><given-names>FC</given-names> </name></person-group><article-title>Comparison of jaw-muscle bite-force cross-sections obtained by means of magnetic resonance imaging and high-resolution CT scanning</article-title><source>J Dent Res</source><year>1989</year><month>12</month><volume>68</volume><issue>12</issue><fpage>1765</fpage><lpage>1770</lpage><pub-id pub-id-type="doi">10.1177/00220345890680120901</pub-id><pub-id pub-id-type="medline">2600258</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lee</surname><given-names>YH</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>KM</given-names> </name><name name-style="western"><surname>Auh</surname><given-names>QS</given-names> </name></person-group><article-title>MRI-based assessment of masticatory muscle changes in TMD patients after Whiplash injury</article-title><source>J Clin Med</source><year>2021</year><month>04</month><day>1</day><volume>10</volume><issue>7</issue><fpage>1404</fpage><pub-id pub-id-type="doi">10.3390/jcm10071404</pub-id><pub-id pub-id-type="medline">33915742</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tay</surname><given-names>L</given-names> </name><name name-style="western"><surname>Ding</surname><given-names>YY</given-names> </name><name name-style="western"><surname>Leung</surname><given-names>BP</given-names> </name><etal/></person-group><article-title>Sex-specific differences in risk factors for sarcopenia amongst community-dwelling older adults</article-title><source>Age (Dordr)</source><year>2015</year><month>12</month><volume>37</volume><issue>6</issue><fpage>121</fpage><pub-id pub-id-type="doi">10.1007/s11357-015-9860-3</pub-id><pub-id pub-id-type="medline">26607157</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Fung</surname><given-names>FY</given-names> </name><name name-style="western"><surname>Koh</surname><given-names>YLE</given-names> </name><name name-style="western"><surname>Malhotra</surname><given-names>R</given-names> </name><etal/></person-group><article-title>Prevalence of and factors associated with sarcopenia among multi-ethnic ambulatory older Asians with type 2 diabetes mellitus in a primary care setting</article-title><source>BMC Geriatr</source><year>2019</year><month>04</month><day>29</day><volume>19</volume><issue>1</issue><fpage>122</fpage><pub-id pub-id-type="doi">10.1186/s12877-019-1137-8</pub-id><pub-id pub-id-type="medline">31035928</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>van Heusden</surname><given-names>HC</given-names> </name><name name-style="western"><surname>Chargi</surname><given-names>N</given-names> </name><name name-style="western"><surname>Dankbaar</surname><given-names>JW</given-names> </name><name name-style="western"><surname>Smid</surname><given-names>EJ</given-names> </name><name name-style="western"><surname>de Bree</surname><given-names>R</given-names> </name></person-group><article-title>Masseter muscle parameters can function as an alternative for skeletal muscle mass assessments on cross-sectional imaging at lumbar or cervical vertebral levels</article-title><source>Quant Imaging Med Surg</source><year>2022</year><month>01</month><volume>12</volume><issue>1</issue><fpage>15</fpage><lpage>27</lpage><pub-id pub-id-type="doi">10.21037/qims-21-43</pub-id><pub-id pub-id-type="medline">34993057</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aarsland</surname><given-names>D</given-names> </name><name name-style="western"><surname>Rongve</surname><given-names>A</given-names> </name><name name-style="western"><surname>Nore</surname><given-names>SP</given-names> </name><etal/></person-group><article-title>Frequency and case identification of dementia with Lewy bodies using the revised consensus criteria</article-title><source>Dement Geriatr Cogn Disord</source><year>2008</year><volume>26</volume><issue>5</issue><fpage>445</fpage><lpage>452</lpage><pub-id pub-id-type="doi">10.1159/000165917</pub-id><pub-id pub-id-type="medline">18974647</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bell</surname><given-names>CC</given-names> </name></person-group><article-title>DSM-IV: Diagnostic and Statistical Manual of Mental Disorders</article-title><source>J Am Med Assoc</source><year>1994</year><month>09</month><day>14</day><volume>272</volume><issue>10</issue><fpage>828</fpage><pub-id pub-id-type="doi">10.1001/jama.1994.03520100096046</pub-id><pub-id pub-id-type="medline">7933395</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Skogseth</surname><given-names>R</given-names> </name><name name-style="western"><surname>Hortob&#x00E1;gyi</surname><given-names>T</given-names> </name><name name-style="western"><surname>Soennesyn</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Accuracy of clinical diagnosis of dementia with Lewy bodies versus neuropathology</article-title><source>J Alzheimers Dis</source><year>2017</year><volume>59</volume><issue>4</issue><fpage>1139</fpage><lpage>1152</lpage><pub-id pub-id-type="doi">10.3233/JAD-170274</pub-id><pub-id pub-id-type="medline">28731443</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Borda</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Bani Hassan</surname><given-names>E</given-names> </name><name name-style="western"><surname>Weon</surname><given-names>JH</given-names> </name><etal/></person-group><article-title>Muscle volume and intramuscular fat of the tongue evaluated with MRI predict malnutrition in people living with dementia: a 5-year follow-up study</article-title><source>J Gerontol A Biol Sci Med Sci</source><year>2022</year><month>02</month><day>3</day><volume>77</volume><issue>2</issue><fpage>228</fpage><lpage>234</lpage><pub-id pub-id-type="doi">10.1093/gerona/glab224</pub-id><pub-id pub-id-type="medline">34338751</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Falk</surname><given-names>T</given-names> </name><name name-style="western"><surname>Mai</surname><given-names>D</given-names> </name><name name-style="western"><surname>Bensch</surname><given-names>R</given-names> </name><etal/></person-group><article-title>U-Net: deep learning for cell counting, detection, and morphometry</article-title><source>Nat Methods</source><year>2019</year><month>01</month><volume>16</volume><issue>1</issue><fpage>67</fpage><lpage>70</lpage><pub-id pub-id-type="doi">10.1038/s41592-018-0261-2</pub-id><pub-id pub-id-type="medline">30559429</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="confproc"><person-group person-group-type="editor"><name name-style="western"><surname>Ronneberger</surname><given-names>O</given-names> </name><name name-style="western"><surname>Fischer</surname><given-names>P</given-names> </name><name name-style="western"><surname>Brox</surname><given-names>T</given-names> </name></person-group><article-title>U-net: convolutional networks for biomedical image segmentation</article-title><conf-name>Medical Image Computing and Computer-Assisted Intervention&#x2013;MICCAI 2015: 18th International Conference</conf-name><conf-date>Oct 5-9, 2015</conf-date><conf-loc>Munich, Germany</conf-loc></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="preprint"><person-group person-group-type="author"><name name-style="western"><surname>Oktay</surname><given-names>O</given-names> </name><name name-style="western"><surname>Schlemper</surname><given-names>J</given-names> </name><name name-style="western"><surname>Folgoc</surname><given-names>LL</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>M</given-names> </name><name name-style="western"><surname>Heinrich</surname><given-names>M</given-names> </name><name name-style="western"><surname>Misawa</surname><given-names>K</given-names> </name><etal/></person-group><article-title>Attention U-Net: learning where to look for the pancreas</article-title><source>arXiv</source><comment>Preprint posted online on 2018</comment><comment>arXiv:180403999</comment></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cai</surname><given-names>S</given-names> </name><name name-style="western"><surname>Tian</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Lui</surname><given-names>H</given-names> </name><name name-style="western"><surname>Zeng</surname><given-names>H</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>G</given-names> </name></person-group><article-title>Dense-UNet: a novel multiphoton in vivo cellular image segmentation model based on a convolutional neural network</article-title><source>Quant Imaging Med Surg</source><year>2020</year><month>06</month><volume>10</volume><issue>6</issue><fpage>1275</fpage><lpage>1285</lpage><pub-id pub-id-type="doi">10.21037/qims-19-1090</pub-id><pub-id pub-id-type="medline">32550136</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>ZX</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>QJ</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>YH</given-names> </name></person-group><article-title>Road extraction by deep residual U-Net</article-title><source>IEEE Geosci Remote Sensing Lett</source><year>2018</year><month>05</month><volume>15</volume><issue>5</issue><fpage>749</fpage><lpage>753</lpage><pub-id pub-id-type="doi">10.1109/LGRS.2018.2802944</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Punn</surname><given-names>NS</given-names> </name><name name-style="western"><surname>Agarwal</surname><given-names>S</given-names> </name></person-group><article-title>Inception U-Net architecture for semantic segmentation to identify nuclei in microscopy cell images</article-title><source>ACM Trans Multimedia Comput Commun Appl</source><year>2020</year><month>02</month><day>29</day><volume>16</volume><issue>1</issue><fpage>1</fpage><lpage>15</lpage><pub-id pub-id-type="doi">10.1145/3376922</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhou</surname><given-names>Z</given-names> </name><name name-style="western"><surname>Siddiquee</surname><given-names>MMR</given-names> </name><name name-style="western"><surname>Tajbakhsh</surname><given-names>N</given-names> </name><name name-style="western"><surname>Liang</surname><given-names>J</given-names> </name></person-group><article-title>UNet++: redesigning skip connections to exploit multiscale features in image segmentation</article-title><source>IEEE Trans Med Imaging</source><year>2020</year><month>06</month><volume>39</volume><issue>6</issue><fpage>1856</fpage><lpage>1867</lpage><pub-id pub-id-type="doi">10.1109/TMI.2019.2959609</pub-id><pub-id pub-id-type="medline">31841402</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="book"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>ZL</given-names> </name><name name-style="western"><surname>Sabuncu</surname><given-names>MR</given-names> </name></person-group><article-title>Generalized cross entropy loss for training deep neural networks with noisy labels</article-title><source>Adv Neural Inf Process Syst</source><year>2018</year><volume>31</volume><pub-id pub-id-type="medline">000461852003034</pub-id><pub-id pub-id-type="other">9781510884472</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Garcia-Garcia</surname><given-names>A</given-names> </name><name name-style="western"><surname>Orts-Escolano</surname><given-names>S</given-names> </name><name name-style="western"><surname>Oprea</surname><given-names>S</given-names> </name><name name-style="western"><surname>Villena-Martinez</surname><given-names>V</given-names> </name><name name-style="western"><surname>Martinez-Gonzalez</surname><given-names>P</given-names> </name><name name-style="western"><surname>Garcia-Rodriguez</surname><given-names>J</given-names> </name></person-group><article-title>A survey on deep learning techniques for image and video semantic segmentation</article-title><source>Appl Soft Comput</source><year>2018</year><month>09</month><volume>70</volume><fpage>41</fpage><lpage>65</lpage><pub-id pub-id-type="doi">10.1016/j.asoc.2018.05.018</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Aldoj</surname><given-names>N</given-names> </name><name name-style="western"><surname>Biavati</surname><given-names>F</given-names> </name><name name-style="western"><surname>Michallek</surname><given-names>F</given-names> </name><name name-style="western"><surname>Stober</surname><given-names>S</given-names> </name><name name-style="western"><surname>Dewey</surname><given-names>M</given-names> </name></person-group><article-title>Automatic prostate and prostate zones segmentation of magnetic resonance images using DenseNet-like U-net</article-title><source>Sci Rep</source><year>2020</year><month>08</month><day>31</day><volume>10</volume><issue>1</issue><fpage>14315</fpage><pub-id pub-id-type="doi">10.1038/s41598-020-71080-0</pub-id><pub-id pub-id-type="medline">32868836</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Borda</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Castellanos-Perilla</surname><given-names>N</given-names> </name><name name-style="western"><surname>Tovar-Rios</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Ferreira</surname><given-names>D</given-names> </name><name name-style="western"><surname>Duque</surname><given-names>G</given-names> </name><name name-style="western"><surname>Aarsland</surname><given-names>D</given-names> </name></person-group><article-title>Tongue muscle mass is associated with total grey matter and hippocampal volumes in Dementia with Lewy Bodies</article-title><source>Arch Gerontol Geriatr</source><year>2022</year><volume>100</volume><fpage>104647</fpage><pub-id pub-id-type="doi">10.1016/j.archger.2022.104647</pub-id><pub-id pub-id-type="medline">35134612</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Uhlich</surname><given-names>R</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>P</given-names> </name></person-group><article-title>Sarcopenia diagnosed using masseter muscle area predictive of early mortality following severe traumatic brain injury</article-title><source>Neural Regen Res</source><year>2018</year><month>12</month><volume>13</volume><issue>12</issue><fpage>2089</fpage><lpage>2090</lpage><pub-id pub-id-type="doi">10.4103/1673-5374.241451</pub-id><pub-id pub-id-type="medline">30323130</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wallace</surname><given-names>JD</given-names> </name><name name-style="western"><surname>Calvo</surname><given-names>RY</given-names> </name><name name-style="western"><surname>Lewis</surname><given-names>PR</given-names> </name><etal/></person-group><article-title>Sarcopenia as a predictor of mortality in elderly blunt trauma patients: comparing the masseter to the psoas using computed tomography</article-title><source>J Trauma Acute Care Surg</source><year>2017</year><month>01</month><volume>82</volume><issue>1</issue><fpage>65</fpage><lpage>72</lpage><pub-id pub-id-type="doi">10.1097/TA.0000000000001297</pub-id><pub-id pub-id-type="medline">27820555</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Borda</surname><given-names>MG</given-names> </name><name name-style="western"><surname>Duque</surname><given-names>G</given-names> </name><name name-style="western"><surname>P&#x00E9;rez-Zepeda</surname><given-names>MU</given-names> </name><etal/></person-group><article-title>Using magnetic resonance imaging to measure head muscles: an innovative method to opportunistically determine muscle mass and detect sarcopenia</article-title><source>J Cachexia Sarcopenia Muscle</source><year>2024</year><month>02</month><volume>15</volume><issue>1</issue><fpage>189</fpage><lpage>197</lpage><pub-id pub-id-type="doi">10.1002/jcsm.13362</pub-id><pub-id pub-id-type="medline">38050325</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Morshed</surname><given-names>RA</given-names> </name><name name-style="western"><surname>Young</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Casey</surname><given-names>M</given-names> </name><etal/></person-group><article-title>Sarcopenia diagnosed using masseter muscle diameter as a survival correlate in elderly patients with glioblastoma</article-title><source>World Neurosurg</source><year>2022</year><month>05</month><volume>161</volume><fpage>e448</fpage><lpage>e463</lpage><pub-id pub-id-type="doi">10.1016/j.wneu.2022.02.038</pub-id><pub-id pub-id-type="medline">35181534</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McGoldrick</surname><given-names>D</given-names> </name><name name-style="western"><surname>Alsabbagh</surname><given-names>AY</given-names> </name><name name-style="western"><surname>Pettit</surname><given-names>L</given-names> </name><name name-style="western"><surname>Shaikh</surname><given-names>M</given-names> </name><name name-style="western"><surname>Bhatia</surname><given-names>S</given-names> </name></person-group><article-title>Masseter muscle defined sarcopenia and survival in head and neck cancer patients at a single cancer centre</article-title><source>Br J Oral Maxillofac Surg</source><year>2020</year><month>12</month><volume>58</volume><issue>10</issue><fpage>e157</fpage><lpage>e158</lpage><pub-id pub-id-type="doi">10.1016/j.bjoms.2020.10.073</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hogenbirk</surname><given-names>RNM</given-names> </name><name name-style="western"><surname>Banning</surname><given-names>LBD</given-names> </name><name name-style="western"><surname>Visser</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Association between masseter muscle area and thickness and outcome after carotid endarterectomy: a retrospective cohort study</article-title><source>J Clin Med</source><year>2022</year><month>05</month><day>30</day><volume>11</volume><issue>11</issue><fpage>3087</fpage><pub-id pub-id-type="doi">10.3390/jcm11113087</pub-id><pub-id pub-id-type="medline">35683474</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kamada</surname><given-names>T</given-names> </name><name name-style="western"><surname>Ohdaira</surname><given-names>H</given-names> </name><name name-style="western"><surname>Ito</surname><given-names>E</given-names> </name><etal/></person-group><article-title>Association between masseter muscle sarcopenia and postoperative pneumonia in patients with esophageal cancer</article-title><source>Sci Rep</source><year>2022</year><month>09</month><day>30</day><volume>12</volume><issue>1</issue><fpage>16374</fpage><pub-id pub-id-type="doi">10.1038/s41598-022-20967-1</pub-id><pub-id pub-id-type="medline">36180776</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hwang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>YH</given-names> </name><name name-style="western"><surname>Cho</surname><given-names>DH</given-names> </name><name name-style="western"><surname>Kim</surname><given-names>M</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>DS</given-names> </name><name name-style="western"><surname>Cho</surname><given-names>HJ</given-names> </name></person-group><article-title>Applicability of the masseter muscle as a nutritional biomarker</article-title><source>Medicine (Baltimore)</source><year>2020</year><month>02</month><volume>99</volume><issue>6</issue><fpage>e19069</fpage><pub-id pub-id-type="doi">10.1097/MD.0000000000019069</pub-id><pub-id pub-id-type="medline">32028430</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>KC</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>TM</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>WT</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>TG</given-names> </name><name name-style="western"><surname>Han</surname><given-names>DS</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>KV</given-names> </name></person-group><article-title>Assessment of tongue strength in sarcopenia and sarcopenic dysphagia: a systematic review and meta-analysis</article-title><source>Front Nutr</source><year>2021</year><volume>8</volume><fpage>684840</fpage><pub-id pub-id-type="doi">10.3389/fnut.2021.684840</pub-id><pub-id pub-id-type="medline">34249993</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Volk</surname><given-names>GF</given-names> </name><name name-style="western"><surname>Karamyan</surname><given-names>I</given-names> </name><name name-style="western"><surname>Klingner</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Reichenbach</surname><given-names>JR</given-names> </name><name name-style="western"><surname>Guntinas-Lichius</surname><given-names>O</given-names> </name></person-group><article-title>Quantitative magnetic resonance imaging volumetry of facial muscles in healthy patients with facial palsy</article-title><source>Plast Reconstr Surg Glob Open</source><year>2014</year><month>06</month><volume>2</volume><issue>6</issue><fpage>e173</fpage><pub-id pub-id-type="doi">10.1097/GOX.0000000000000128</pub-id><pub-id pub-id-type="medline">25289366</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ng</surname><given-names>HP</given-names> </name><name name-style="western"><surname>Foong</surname><given-names>KWC</given-names> </name><name name-style="western"><surname>Ong</surname><given-names>SH</given-names> </name><etal/></person-group><article-title>Quantitative analysis of human masticatory muscles using magnetic resonance imaging</article-title><source>Dentomaxillofac Radiol</source><year>2009</year><month>05</month><volume>38</volume><issue>4</issue><fpage>224</fpage><lpage>231</lpage><pub-id pub-id-type="doi">10.1259/dmfr/75198413</pub-id><pub-id pub-id-type="medline">19372110</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ng</surname><given-names>HP</given-names> </name><name name-style="western"><surname>Ong</surname><given-names>SH</given-names> </name><name name-style="western"><surname>Hu</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Foong</surname><given-names>KWC</given-names> </name><name name-style="western"><surname>Goh</surname><given-names>PS</given-names> </name><name name-style="western"><surname>Nowinski</surname><given-names>WL</given-names> </name></person-group><article-title>Muscles of mastication model-based MR image segmentation</article-title><source>Int J CARS</source><year>2006</year><month>11</month><day>23</day><volume>1</volume><issue>3</issue><fpage>137</fpage><lpage>148</lpage><pub-id pub-id-type="doi">10.1007/s11548-006-0046-4</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ng</surname><given-names>HP</given-names> </name><name name-style="western"><surname>Ong</surname><given-names>SH</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>J</given-names> </name><etal/></person-group><article-title>3D segmentation and quantification of a masticatory muscle from MR data using patient-specific models and matching distributions</article-title><source>J Digit Imaging</source><year>2009</year><month>10</month><volume>22</volume><issue>5</issue><fpage>449</fpage><lpage>462</lpage><pub-id pub-id-type="doi">10.1007/s10278-008-9132-1</pub-id><pub-id pub-id-type="medline">18516642</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jiang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Shang</surname><given-names>F</given-names> </name><name name-style="western"><surname>Peng</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Automatic masseter muscle accurate segmentation from CBCT using deep learning-based model</article-title><source>J Clin Med</source><year>2022</year><month>12</month><day>21</day><volume>12</volume><issue>1</issue><fpage>55</fpage><pub-id pub-id-type="doi">10.3390/jcm12010055</pub-id><pub-id pub-id-type="medline">36614860</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Pei</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Qin</surname><given-names>H</given-names> </name><etal/></person-group><person-group person-group-type="editor"><name name-style="western"><surname>Zha</surname><given-names>H</given-names> </name></person-group><article-title>Masseter muscle segmentation from cone-beam ct images using generative adversarial network</article-title><conf-name>2019 IEEE 16th International Symposium on Biomedical Imaging (ISBI)</conf-name><conf-date>Apr 8-11, 2019</conf-date><conf-loc>Venice, Italy</conf-loc><pub-id pub-id-type="doi">10.1109/ISBI.2019.8759426</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhu</surname><given-names>W</given-names> </name><name name-style="western"><surname>Huang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zeng</surname><given-names>L</given-names> </name><etal/></person-group><article-title>AnatomyNet: deep learning for fast and fully automated whole-volume segmentation of head and neck anatomy</article-title><source>Med Phys</source><year>2019</year><month>02</month><volume>46</volume><issue>2</issue><fpage>576</fpage><lpage>589</lpage><pub-id pub-id-type="doi">10.1002/mp.13300</pub-id><pub-id pub-id-type="medline">30480818</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Han</surname><given-names>W</given-names> </name><name name-style="western"><surname>Xia</surname><given-names>W</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>Z</given-names> </name><etal/></person-group><article-title>Radiomics and artificial intelligence study of masseter muscle segmentation in patients with hemifacial microsomia</article-title><source>J Craniofac Surg</source><year>2023</year><volume>34</volume><issue>2</issue><fpage>809</fpage><lpage>812</lpage><pub-id pub-id-type="doi">10.1097/SCS.0000000000009105</pub-id><pub-id pub-id-type="medline">36728424</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>W</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Dyer</surname><given-names>BA</given-names> </name><etal/></person-group><article-title>Deep learning vs. atlas-based models for fast auto-segmentation of the masticatory muscles on head and neck CT images</article-title><source>Radiat Oncol</source><year>2020</year><month>07</month><day>20</day><volume>15</volume><issue>1</issue><fpage>176</fpage><pub-id pub-id-type="doi">10.1186/s13014-020-01617-0</pub-id><pub-id pub-id-type="medline">32690103</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Table presenting extended accuracy evaluation metrics, including Dice Similarity Coefficient (DSC), Jaccard Coefficient (JC), Average Symmetric Surface Distance (ASSD), Precision, Recall, Sensitivity, Specificity, and F1-Score. Results are presented as mean (standard deviation) and median (interquartile range) across 10-fold cross-validation.</p><media xlink:href="aging_v8i1e63686_app1.xlsx" xlink:title="XLSX File, 20 KB"/></supplementary-material><supplementary-material id="app2"><label>Multimedia Appendix 2</label><p>Table presenting association between area and volumes from MRI scans and BMI in each region of study.</p><media xlink:href="aging_v8i1e63686_app2.docx" xlink:title="DOCX File, 16 KB"/></supplementary-material></app-group></back></article>