<?xml version="1.0" encoding="UTF-8"?><!DOCTYPE article PUBLIC "-//NLM//DTD Journal Publishing DTD v2.0 20040830//EN" "journalpublishing.dtd"><article xmlns:mml="http://www.w3.org/1998/Math/MathML" xmlns:xlink="http://www.w3.org/1999/xlink" dtd-version="2.0" xml:lang="en" article-type="review-article"><front><journal-meta><journal-id journal-id-type="nlm-ta">JMIR Aging</journal-id><journal-id journal-id-type="publisher-id">aging</journal-id><journal-title>JMIR Aging</journal-title><abbrev-journal-title>JMIR Aging</abbrev-journal-title><issn pub-type="epub">2561-7605</issn><publisher><publisher-name>JMIR Publications</publisher-name><publisher-loc>Toronto, Canada</publisher-loc></publisher></journal-meta><article-meta><article-id pub-id-type="publisher-id">v8i1e76981</article-id><article-id pub-id-type="doi">10.2196/76981</article-id><article-categories><subj-group subj-group-type="heading"><subject>Review</subject></subj-group></article-categories><title-group><article-title>Comparative Diagnostic Accuracy of AI-Assisted Fluorine-18 Fluorodeoxyglucose Positron Emission Tomography Versus Structural Magnetic Resonance Imaging in Alzheimer Disease: Systematic Review and Meta-Analysis</article-title></title-group><contrib-group><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Wang</surname><given-names>Bingbing</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" corresp="yes" equal-contrib="yes"><name name-style="western"><surname>Zhao</surname><given-names>Tailiang</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Ma</surname><given-names>Rongrong</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff2">2</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Huo</surname><given-names>Xiaochuan</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Xiong</surname><given-names>Xiaoxiao</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wu</surname><given-names>Minjie</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wang</surname><given-names>Yuran</given-names></name><degrees>MSc</degrees><xref ref-type="aff" rid="aff4">4</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Liu</surname><given-names>Liu</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Zhuang</surname><given-names>Zhijiang</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff3">3</xref></contrib><contrib contrib-type="author"><name name-style="western"><surname>Wang</surname><given-names>Bin</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff4">4</xref><xref ref-type="aff" rid="aff5">5</xref></contrib><contrib contrib-type="author" equal-contrib="yes"><name name-style="western"><surname>Shou</surname><given-names>Jixin</given-names></name><degrees>PhD</degrees><xref ref-type="aff" rid="aff1">1</xref><xref ref-type="fn" rid="equal-contrib1">*</xref></contrib></contrib-group><aff id="aff1"><institution>Department of Neurosurgery, Fifth Affiliated Hospital of Zhengzhou University</institution><addr-line>No. 3 Kangfu-qian Street</addr-line><addr-line>Zhengzhou</addr-line><country>China</country></aff><aff id="aff2"><institution>Department of Anesthesiology and Perioperative Medicine, Xuchang Central Hospital, Henan University of Science and Technology</institution><addr-line>Xuchang</addr-line><country>China</country></aff><aff id="aff3"><institution>Department of Neurology, Fifth Affiliated Hospital of Zhengzhou University</institution><addr-line>Zhengzhou</addr-line><country>China</country></aff><aff id="aff4"><institution>The Fifth Clinical Medical College, Henan Medical College, Zhengzhou University</institution><addr-line>Zhengzhou</addr-line><country>China</country></aff><aff id="aff5"><institution>Department of Cardiology, Beijing Tsinghua Changgung Hospital, School of Clinical Medicine, Tsinghua University</institution><addr-line>Beijing</addr-line><country>China</country></aff><contrib-group><contrib contrib-type="editor"><name name-style="western"><surname>O'Connell</surname><given-names>Megan</given-names></name></contrib></contrib-group><contrib-group><contrib contrib-type="reviewer"><name name-style="western"><surname>Potla</surname><given-names>Ravi Teja</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Mohanadas</surname><given-names>Sadhasivam</given-names></name></contrib><contrib contrib-type="reviewer"><name name-style="western"><surname>Mirshahvalad</surname><given-names>Seyed Ali</given-names></name></contrib></contrib-group><author-notes><corresp>Correspondence to Tailiang Zhao, PhD, Department of Neurosurgery, Fifth Affiliated Hospital of Zhengzhou University, No. 3 Kangfu-qian Street, Zhengzhou, 450052, China, 86 17630927442; <email>tailiang1996@163.com</email></corresp><fn fn-type="equal" id="equal-contrib1"><label>*</label><p>these authors contributed equally</p></fn></author-notes><pub-date pub-type="collection"><year>2025</year></pub-date><pub-date pub-type="epub"><day>8</day><month>10</month><year>2025</year></pub-date><volume>8</volume><elocation-id>e76981</elocation-id><history><date date-type="received"><day>05</day><month>05</month><year>2025</year></date><date date-type="rev-recd"><day>03</day><month>07</month><year>2025</year></date><date date-type="accepted"><day>20</day><month>07</month><year>2025</year></date></history><copyright-statement>&#x00A9; Bingbing Wang, Tailiang Zhao, Rongrong Ma, Xiaochuan Huo, Xiaoxiao Xiong, Minjie Wu, Yuran Wang, Liu Liu, Zhijiang Zhuang, Bin Wang, Jixin Shou. Originally published in JMIR Aging (<ext-link ext-link-type="uri" xlink:href="https://aging.jmir.org">https://aging.jmir.org</ext-link>), 8.10.2025. </copyright-statement><copyright-year>2025</copyright-year><license license-type="open-access" xlink:href="https://creativecommons.org/licenses/by/4.0/"><p>This is an open-access article distributed under the terms of the Creative Commons Attribution License (<ext-link ext-link-type="uri" xlink:href="https://creativecommons.org/licenses/by/4.0/">https://creativecommons.org/licenses/by/4.0/</ext-link>), which permits unrestricted use, distribution, and reproduction in any medium, provided the original work, first published in JMIR Aging, is properly cited. The complete bibliographic information, a link to the original publication on <ext-link ext-link-type="uri" xlink:href="https://aging.jmir.org">https://aging.jmir.org</ext-link>, as well as this copyright and license information must be included.</p></license><self-uri xlink:type="simple" xlink:href="https://aging.jmir.org/2025/1/e76981"/><abstract><sec><title>Background</title><p>Neuroimaging is crucial in the diagnosis of Alzheimer disease (AD). In recent years, artificial intelligence (AI)&#x2013;based neuroimaging technology has rapidly developed, providing new methods for accurate diagnosis of AD, but its performance differences still need to be systematically evaluated.</p></sec><sec><title>Objective</title><p>This study aims to conduct a systematic review and meta-analysis comparing the diagnostic performance of AI-assisted fluorine-18 fluorodeoxyglucose positron emission tomography (<sup>18</sup>F-FDG PET) and structural magnetic resonance imaging (sMRI) for AD.</p></sec><sec sec-type="methods"><title>Methods</title><p>Databases including Web of Science, PubMed, and Embase were searched from inception to January 2025 to identify original studies that developed or validated AI models for AD diagnosis using <sup>18</sup>F-FDG PET or sMRI. Methodological quality was assessed using the TRIPOD-AI (Transparent Reporting of a Multivariable Prediction Model for Individual Prognosis or Diagnosis&#x2013;Artificial Intelligence) checklist. A bivariate mixed-effects model was employed to calculate pooled sensitivity, specificity, and summary receiver operating characteristic curve area (SROC-AUC).</p></sec><sec sec-type="results"><title>Results</title><p>A total of 38 studies were included, with 28 moderate-to-high-quality studies analyzed. Pooled SROC-AUC values were 0.94 (95% CI 0.92&#x2010;0.96) for sMRI and 0.96 (95% CI 0.94&#x2010;0.98) for <sup>18</sup>F-FDG PET, demonstrating statistically significant intermodal differences (<italic>P</italic>=.02). Subgroup analyses revealed that for machine learning, pooled SROC-AUCs were 0.89 (95% CI 0.86&#x2010;0.92) for sMRI and 0.95 (95% CI 0.92&#x2010;0.96) for <sup>18</sup>F-FDG PET, while for deep learning, these values were 0.96 (95% CI 0.94&#x2010;0.97) and 0.97 (95% CI 0.96&#x2010;0.99), respectively. Meta-regression identified heterogeneity arising from study quality stratification, algorithm types, and validation strategies.</p></sec><sec sec-type="conclusions"><title>Conclusions</title><p>Both AI-assisted <sup>18</sup>F-FDG PET and sMRI exhibit high diagnostic accuracy in AD, with <sup>18</sup>F-FDG PET demonstrating superior overall diagnostic performance compared to sMRI.</p></sec></abstract><kwd-group><kwd>18F-FDG PET</kwd><kwd>Alzheimer disease</kwd><kwd>artificial intelligence</kwd><kwd>deep learning</kwd><kwd>machine learning</kwd><kwd>Preferred Reporting Items for Systematic Reviews and Meta-Analyses</kwd><kwd>PRISMA</kwd><kwd>sMRI</kwd><kwd>structural magnetic resonance imaging</kwd><kwd>systematic review</kwd></kwd-group></article-meta></front><body><sec id="s1" sec-type="intro"><title>Introduction</title><p>Alzheimer disease (AD) is a progressive neurodegenerative disorder characterized by insidious onset, cognitive decline, and memory impairment. In the United States, approximately 6.7 million adults aged &#x2265;65 years are affected by AD, while China faces an even greater burden, with over 13 million cases [<xref ref-type="bibr" rid="ref1">1</xref>,<xref ref-type="bibr" rid="ref2">2</xref>]. The prolonged disease course and high comorbidity rates have established AD as one of the most fatal and economically burdensome conditions of the 21st century [<xref ref-type="bibr" rid="ref3">3</xref>-<xref ref-type="bibr" rid="ref5">5</xref>]. Accurate diagnosis of AD is critical for therapeutic decision-making and prognostic evaluation, particularly in the context of global population aging and increasing demands for precise patient stratification in clinical trials [<xref ref-type="bibr" rid="ref6">6</xref>,<xref ref-type="bibr" rid="ref7">7</xref>].</p><p>Neuroimaging modalities, including fluorine-18 fluorodeoxyglucose positron emission tomography (<sup>18</sup>F-FDG PET) and structural magnetic resonance imaging (sMRI), have become cornerstone technologies in AD diagnostic frameworks (eg, National Institute on Aging-Alzheimer's Association criteria) because of their noninvasive nature and quantitative capabilities [<xref ref-type="bibr" rid="ref8">8</xref>-<xref ref-type="bibr" rid="ref10">10</xref>]. Recent advancements in artificial intelligence (AI) have revolutionized medical image analysis: machine learning (ML) enables data-driven predictive modeling beyond traditional rule-based programming, while deep learning (DL), an advanced ML paradigm, employs multilayer neural networks to extract high-level features from complex datasets, demonstrating transformative potential in neuroimaging [<xref ref-type="bibr" rid="ref11">11</xref>-<xref ref-type="bibr" rid="ref14">14</xref>]. Despite extensive research on AI-assisted sMRI or <sup>18</sup>F-FDG PET for AD diagnosis, two critical challenges hinder evaluation of model generalizability: (1) substantial heterogeneity in algorithm designs and validation frameworks, and (2) significant variability in the quality of individual studies [<xref ref-type="bibr" rid="ref15">15</xref>]. Furthermore, no high-quality meta-analysis has comprehensively compared the diagnostic performance of these two imaging modalities.</p><p>This systematic review and meta-analysis addresses three critical objectives: (1) quantitative evaluation of diagnostic accuracy metrics for AI-assisted <sup>18</sup>F-FDG PET and sMRI based on moderate-to-high-quality evidence; (2) direct comparison of diagnostic performance between modalities; and (3) investigation of confounding factors, including study quality (moderate-to-high vs low), algorithm types (ML vs DL), and validation strategies (internal vs external), through meta-regression. The findings are anticipated to inform evidence-based optimization of AD diagnostic pathways.</p></sec><sec id="s2" sec-type="methods"><title>Methods</title><sec id="s2-1"><title>Overview</title><p>This study adhered to the PRISMA-DTA (Preferred Reporting Items for Systematic Reviews and Meta-Analyses of Diagnostic Test Accuracy) guidelines and was prospectively registered on PROSPERO (ID: CRD42023449927) to ensure transparency and minimize reporting bias [<xref ref-type="bibr" rid="ref16">16</xref>,<xref ref-type="bibr" rid="ref17">17</xref>]. Two reviewers independently conducted all stages of the review process, including title and abstract screening, full-text evaluation, data extraction, adherence assessment to reporting guidelines, and risk-of-bias evaluation. Discrepancies were resolved through group consensus.</p></sec><sec id="s2-2"><title>Literature Search Strategy</title><p>Two investigators (TLZ and BW) systematically searched PubMed, Web of Science, and Embase from inception to January 2025 using a combination of Medical Subject Headings (MeSH) terms and free-text keywords. Additional searches were performed on clinical trial registries and OpenGrey to identify unpublished clinical trials and gray literature. Search terms encompassed four domains: (1) disease terminology (Alzheimer disease, AD, Alzheimer Syndrome); (2) imaging modalities (<sup>18</sup>F-FDG PET, sMRI); (3) AI methodologies (ML, DL); and (4) diagnostic metrics (sensitivity, specificity, summary receiver operating characteristic curve area [SROC-AUC]). Reference lists of included studies were manually screened to identify additional relevant publications. The specific search strategies are provided in Table S1 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec><sec id="s2-3"><title>Literature Inclusion and Exclusion Criteria</title><p>The inclusion criteria were as follows: (1) human studies developing or validating AI models using <sup>18</sup>F-FDG PET or sMRI to differentiate AD from normal controls; (2) AD diagnosis based on National Institute on Aging-Alzheimer's Association or International Working Group for New Research Criteria for Alzheimer&#x2019;s Disease criteria; (3) availability of diagnostic performance metrics (eg, true positives, false positives, true negatives, false negatives) or explicit reporting of sensitivity and specificity; and (4) full-text availability in English. The exclusion criteria were as follows: (1) case reports, reviews, letters, or conference abstracts; (2) studies lacking sufficient diagnostic performance data; and (3) duplicate publications reporting on the same cohort without novel analyses.</p></sec><sec id="s2-4"><title>Literature Screening and Data Extraction</title><p>Two reviewers (TZ and BW) independently performed title and abstract and full-text screening, followed by cross-verification to ensure accuracy. Two additional investigators (RM and XH) extracted data using predefined forms, including study characteristics (first author, publication year), model specifications (algorithm type, validation strategies), and diagnostic performance metrics (2&#x00D7;2 contingency tables, sensitivity, specificity). Extracted data were cross-checked for completeness and precision.</p></sec><sec id="s2-5"><title>Quality Assessment of Included Studies</title><p>The methodological quality of included studies was evaluated using an adapted TRIPOD-AI (Transparent Reporting of a Multivariable Prediction Model for Individual Prognosis or Diagnosis&#x2013;Artificial Intelligence) checklist (Table S2 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>) by two independent reviewers (RM and XH) [<xref ref-type="bibr" rid="ref18">18</xref>]. The instrument assessed four domains: (1) data quality (sample diversity, adequacy, preprocessing standardization), (2) model development (feature extraction rationale, algorithm selection, hyperparameter optimization), (3) validation methods (cross-validation rigor, external validation), and (4) clinical applicability (interpretability, net clinical benefit). Each of the nine items was scored 0&#x2010;2 (0=unsatisfied; 1=partially satisfied; 2=fully satisfied), with total scores categorized as high quality (&#x2265;16), moderate quality (10-15), or low quality (&#x2264;9). Disagreements were resolved through discussion.</p></sec><sec id="s2-6"><title>Data Analysis</title><p>Diagnostic performance metrics were calculated using a bivariate mixed-effects model. SROC-AUC served as the primary outcome due to its threshold independence. Diagnostic accuracy was classified per National Institutes of Health criteria as high (AUC&#x2265;0.90), moderate (0.70&#x2010;0.89), or low (AUC&#x003C;0.70) [<xref ref-type="bibr" rid="ref19">19</xref>].</p></sec><sec id="s2-7"><title>Heterogeneity Assessment and Publication Bias Evaluation</title><p>Heterogeneity was quantified using Cochran <italic>Q</italic> test (significance defined at the .05 level) and <italic>I</italic><sup>2</sup> statistics (25%: low; 50%: moderate; 75%: high heterogeneity) [<xref ref-type="bibr" rid="ref20">20</xref>]. Threshold effects were assessed using Spearman correlation between logit-transformed sensitivity and 1&#x2013;specificity. Sensitivity analyses evaluated outlier influence by iteratively excluding individual studies. Univariable meta-regression analyses were conducted to assess the influence of potential confounding factors, including study quality (moderate to high vs low quality), algorithm type (ML vs DL), and validation strategy (internal vs external validation). Statistical significance of modifiers of the pooled effect size was defined at the .05 level after adjustment via the Holm-Bonferroni method. Publication bias was assessed via funnel plot asymmetry and Egger test, with significance defined at the .05 level. All analyses were conducted in Stata 16.0 using the MIDAS commands.</p><p>As this study focused on diagnostic accuracy, we employed the bivariate random-effects model via the MIDAS command in Stata to jointly synthesize sensitivity and specificity. This approach accounts for both within- and between-study variability, as well as the inherent correlation between sensitivity and specificity. In such models, conventional heterogeneity measures like <italic>I</italic>&#x00B2;, although still reported for completeness, may not fully capture the joint variability of test performance measures. Therefore, to better understand heterogeneity, we additionally performed meta-regression and calculated joint likelihood ratio tests to explore potential effect modifiers. These methods align with recommended practices in diagnostic test accuracy meta-analyses [<xref ref-type="bibr" rid="ref21">21</xref>,<xref ref-type="bibr" rid="ref22">22</xref>].</p></sec></sec><sec id="s3" sec-type="results"><title>Results</title><sec id="s3-1"><title>Literature Screening Process and Results</title><p>Initial searches identified 876 PubMed records (n=89 involving <sup>18</sup>F-FDG PET; n=787 sMRI), 932 Embase records (n=111 <sup>18</sup>F-FDG PET; n=821 sMRI), and 2610 Web of Science records (n=230 <sup>18</sup>F-FDG PET; n=2380 sMRI), supplemented by 3 additional studies. After deduplication in EndNote 20, 2 reviewers independently screened titles and abstracts using the Population, Intervention, Comparison, Outcome, Study Design (PICOS) criteria, followed by full-text evaluation against predefined inclusion/exclusion criteria. This rigorous process yielded 38 studies for systematic review and meta-analysis [<xref ref-type="bibr" rid="ref23">23</xref>-<xref ref-type="bibr" rid="ref60">60</xref>]. The literature screening process and results are shown in <xref ref-type="fig" rid="figure1">Figure 1</xref>.</p><fig position="float" id="figure1"><label>Figure 1.</label><caption><p>PRISMA (Preferred Reporting Items for Systematic Reviews and Meta-Analysis) flow diagram.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="aging_v8i1e76981_fig01.png"/></fig></sec><sec id="s3-2"><title>Characteristics of Included Studies</title><p><xref ref-type="table" rid="table1">Table 1</xref> and Table S3 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref> detail the characteristics of the included studies. Imaging modalities comprised sMRI (n=15, 39%), <sup>18</sup>F-FDG PET (n=15, 39%), and combined sMRI/<sup>18</sup>F-FDG PET (n=8, 22%). Data sources predominantly relied on open-access databases (n=29, 76%), with 28 (97%) from the Alzheimer&#x2019;s Disease Neuroimaging Initiative database and 1 (3%) from the Open Access Series of Imaging Studies database. Four studies (11%) used institutional data, while 5 (13%) combined Alzheimer&#x2019;s Disease Neuroimaging Initiative database with local datasets. Algorithmically, 26 studies (68%) employed ML, predominantly support vector machines (17/26, 65%), whereas 12 (32%) utilized DL, primarily convolutional neural networks (9/12, 75%). Internal validation was implemented in 33 studies (87%), with 10-fold cross-validation in 16 (48%); only 5 studies (13%) incorporated external validation.</p><table-wrap id="t1" position="float"><label>Table 1.</label><caption><p>Characteristics of all included studies (n=38).</p></caption><table id="table1" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom">Studies</td><td align="left" valign="bottom" colspan="7">Model development and internal validation</td><td align="left" valign="bottom" colspan="2">Model external validation</td></tr><tr><td align="left" valign="bottom"/><td align="left" valign="bottom">Definition</td><td align="left" valign="bottom">Modality</td><td align="left" valign="bottom">Data source</td><td align="left" valign="bottom">Algorithm used</td><td align="left" valign="bottom">Training/validation (ratio)</td><td align="left" valign="bottom">Testing</td><td align="left" valign="bottom">Type of internal validation</td><td align="left" valign="bottom">Data source</td><td align="left" valign="bottom">Definition</td></tr></thead><tbody><tr><td align="left" valign="top">Zhang et al [<xref ref-type="bibr" rid="ref23">23</xref>], 2011</td><td align="left" valign="top">AD<sup><xref ref-type="table-fn" rid="table1fn1">a</xref></sup>: 51; NC<sup><xref ref-type="table-fn" rid="table1fn2">b</xref></sup>: 52</td><td align="left" valign="top">sMRI<sup><xref ref-type="table-fn" rid="table1fn3">c</xref></sup>; PET<sup><xref ref-type="table-fn" rid="table1fn4">d</xref></sup></td><td align="left" valign="top">ADNI<sup><xref ref-type="table-fn" rid="table1fn5">e</xref></sup> database</td><td align="left" valign="top">SVM<sup><xref ref-type="table-fn" rid="table1fn6">f</xref></sup></td><td align="left" valign="top">NR<sup><xref ref-type="table-fn" rid="table1fn7">g</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV<sup><xref ref-type="table-fn" rid="table1fn8">h</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Yun et al [<xref ref-type="bibr" rid="ref24">24</xref>], 2015</td><td align="left" valign="top">AD: 71; NC: 85</td><td align="left" valign="top">sMRI; PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">LDA<sup><xref ref-type="table-fn" rid="table1fn9">i</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">LOOCV<sup><xref ref-type="table-fn" rid="table1fn10">j</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Westman et al [<xref ref-type="bibr" rid="ref25">25</xref>], 2012</td><td align="left" valign="top">AD: 96; NC: 111</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">OPLS<sup><xref ref-type="table-fn" rid="table1fn11">k</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">7-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Vemuri et al [<xref ref-type="bibr" rid="ref26">26</xref>], 2008</td><td align="left" valign="top">AD: 190; NC: 190</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">Mayo Clinic</td><td align="left" valign="top">SVM</td><td align="left" valign="top">280</td><td align="left" valign="top">100</td><td align="left" valign="top">4-fold CV; hold-out</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Suk et al [<xref ref-type="bibr" rid="ref27">27</xref>], 2014</td><td align="left" valign="top">AD: 93; NC: 101</td><td align="left" valign="top">sMRI; PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">DBM<sup><xref ref-type="table-fn" rid="table1fn12">l</xref></sup>; SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Sayeed et al [<xref ref-type="bibr" rid="ref28">28</xref>], 2002</td><td align="left" valign="top">AD: 18; NC: 10</td><td align="left" valign="top">PET</td><td align="left" valign="top">Hammersmith Hospital</td><td align="left" valign="top">DFA<sup><xref ref-type="table-fn" rid="table1fn13">m</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">LOOCV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Pan et al [<xref ref-type="bibr" rid="ref29">29</xref>], 2019</td><td align="left" valign="top">AD: 247; NC: 246</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Padilla et al [<xref ref-type="bibr" rid="ref30">30</xref>], 2012</td><td align="left" valign="top">AD: 53; NC: 52</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">LOOCV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Ni et al [<xref ref-type="bibr" rid="ref31">31</xref>], 2021</td><td align="left" valign="top">AD: 638; NC: 629</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">CNN<sup><xref ref-type="table-fn" rid="table1fn14">n</xref></sup></td><td align="left" valign="top">1000</td><td align="left" valign="top">267</td><td align="left" valign="top">Hold-out</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Magnin et al [<xref ref-type="bibr" rid="ref32">32</xref>], 2009</td><td align="left" valign="top">AD: 16; NC: 22</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">Piti&#x00E9;-Salp&#x00E9;triere Hospital</td><td align="left" valign="top">SVM</td><td align="left" valign="top">75%</td><td align="left" valign="top">25%</td><td align="left" valign="top">Bootstrap resampling</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Lu et al [<xref ref-type="bibr" rid="ref33">33</xref>], 2018</td><td align="left" valign="top">AD: 226; NC: 304</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">MDNN<sup><xref ref-type="table-fn" rid="table1fn15">o</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Liu et al [<xref ref-type="bibr" rid="ref34">34</xref>], 2012</td><td align="left" valign="top">AD: 198; NC: 229</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SRC<sup><xref ref-type="table-fn" rid="table1fn16">p</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Liu et al [<xref ref-type="bibr" rid="ref35">35</xref>], 2018</td><td align="left" valign="top">AD: 93; NC: 100</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">2D-CNN</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Li et al [<xref ref-type="bibr" rid="ref36">36</xref>], 2015</td><td align="left" valign="top">AD: 44; NC: 45</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database; TUM database</td><td align="left" valign="top">GMM<sup><xref ref-type="table-fn" rid="table1fn17">q</xref></sup>; SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Lerch et al [<xref ref-type="bibr" rid="ref37">37</xref>], 2008</td><td align="left" valign="top">AD: 19; NC: 17</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">Ludwig Maximilian University of Munich</td><td align="left" valign="top">QDA<sup><xref ref-type="table-fn" rid="table1fn18">r</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">LOOCV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Kim et al [<xref ref-type="bibr" rid="ref38">38</xref>], 2020</td><td align="left" valign="top">AD: 141; NC: 348</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">CNN</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Severance dataset</td><td align="left" valign="top">AD: 80; NC: 72</td></tr><tr><td align="left" valign="top">Kim et al [<xref ref-type="bibr" rid="ref39">39</xref>], 2020</td><td align="left" valign="top">AD: 139; NC: 347</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">BEGAN<sup><xref ref-type="table-fn" rid="table1fn19">s</xref></sup>; SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Severance dataset</td><td align="left" valign="top">AD: 73; NC: 68</td></tr><tr><td align="left" valign="top">Katako et al [<xref ref-type="bibr" rid="ref40">40</xref>], 2018</td><td align="left" valign="top">AD: 94; NC: 111</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Ismail et al [<xref ref-type="bibr" rid="ref41">41</xref>], 2023</td><td align="left" valign="top">AD: 511; NC: 535</td><td align="left" valign="top">sMRI; PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">MultiAz-Net; SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Ill&#x00E1;n et al [<xref ref-type="bibr" rid="ref42">42</xref>], 2011</td><td align="left" valign="top">AD: 95; NC: 97</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">LOOCV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Hinrichs et al [<xref ref-type="bibr" rid="ref43">43</xref>], 2009</td><td align="left" valign="top">AD: 89; NC: 94</td><td align="left" valign="top">sMRI; PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">LPboosting<sup><xref ref-type="table-fn" rid="table1fn20">t</xref></sup></td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">Leave-many-out CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Gray et al [<xref ref-type="bibr" rid="ref44">44</xref>], 2012</td><td align="left" valign="top">AD: 50; NC: 54</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">75%</td><td align="left" valign="top">25%</td><td align="left" valign="top">Monte Carlo CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Gray et al [<xref ref-type="bibr" rid="ref45">45</xref>], 2013</td><td align="left" valign="top">AD: 37; NC: 35</td><td align="left" valign="top">sMRI; PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">RF<sup><xref ref-type="table-fn" rid="table1fn21">u</xref></sup></td><td align="left" valign="top">75%</td><td align="left" valign="top">25%</td><td align="left" valign="top">Monte Carlo CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Feng et al [<xref ref-type="bibr" rid="ref46">46</xref>], 2019</td><td align="left" valign="top">AD: 93; NC: 100</td><td align="left" valign="top">sMRI; PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">FSBi-LSTM<sup><xref ref-type="table-fn" rid="table1fn22">v</xref></sup>; 3D-CNN</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Cuingnet et al [<xref ref-type="bibr" rid="ref47">47</xref>], 2011</td><td align="left" valign="top">AD: 137; NC: 162</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">50%</td><td align="left" valign="top">50%</td><td align="left" valign="top">Hold-out</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Chen et al [<xref ref-type="bibr" rid="ref48">48</xref>], 2022</td><td align="left" valign="top">AD: 326; NC: 413</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">ADNI-1 database; ADNI-2 database</td><td align="left" valign="top">2D-CNN; 3D-CNN</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">ADNI-1 database; ADNI-2 database</td><td align="left" valign="top">AD: 326; NC: 413</td></tr><tr><td align="left" valign="top">Song et al [<xref ref-type="bibr" rid="ref49">49</xref>], 2021</td><td align="left" valign="top">AD: 95; NC: 126</td><td align="left" valign="top">sMRI; PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">3D-CNN</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Li et al [<xref ref-type="bibr" rid="ref50">50</xref>], 2019</td><td align="left" valign="top">AD: 130; NC: 162</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">70%</td><td align="left" valign="top">30%</td><td align="left" valign="top">Monte Carlo CV</td><td align="left" valign="top">Huashan database</td><td align="left" valign="top">AD: 22; NC: 22</td></tr><tr><td align="left" valign="top">Ahila et al [<xref ref-type="bibr" rid="ref51">51</xref>], 2022</td><td align="left" valign="top">AD: 220; NC: 635</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">2D-CNN</td><td align="left" valign="top">90%</td><td align="left" valign="top">10%</td><td align="left" valign="top">Hold-out</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Toussaint et al [<xref ref-type="bibr" rid="ref52">52</xref>], 2012</td><td align="left" valign="top">AD: 80; NC: 80</td><td align="left" valign="top">PET</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">LOOCV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Tong et al [<xref ref-type="bibr" rid="ref53">53</xref>], 2014</td><td align="left" valign="top">AD: 198; NC: 231</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">LOOCV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Min et al [<xref ref-type="bibr" rid="ref54">54</xref>], 2014</td><td align="left" valign="top">AD: 97; NC: 128</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Jin et al [<xref ref-type="bibr" rid="ref55">55</xref>], 2020</td><td align="left" valign="top">AD: 488; NC: 536</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">In-house database; ADNI database</td><td align="left" valign="top">3D attention network</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV; leave center out CV</td><td align="left" valign="top">ADNI database; in-house database</td><td align="left" valign="top">AD: 488; NC: 536</td></tr><tr><td align="left" valign="top">Cho et al [<xref ref-type="bibr" rid="ref56">56</xref>], 2012</td><td align="left" valign="top">AD: 128; NC: 160</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">LDA</td><td align="left" valign="top">142</td><td align="left" valign="top">146</td><td align="left" valign="top">Hold-out</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Chincarini et al [<xref ref-type="bibr" rid="ref57">57</xref>], 2011</td><td align="left" valign="top">AD: 144; NC: 189</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">RF; SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">20-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Beheshti et al [<xref ref-type="bibr" rid="ref58">58</xref>], 2017</td><td align="left" valign="top">AD: 92; NC: 94</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Anandh et al [<xref ref-type="bibr" rid="ref59">59</xref>], 2016</td><td align="left" valign="top">AD: 30; NC: 55</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">OASIS<sup><xref ref-type="table-fn" rid="table1fn23">w</xref></sup> database</td><td align="left" valign="top">SVM</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td><td align="left" valign="top">10-fold CV</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr><tr><td align="left" valign="top">Amoroso et al [<xref ref-type="bibr" rid="ref60">60</xref>], 2018</td><td align="left" valign="top">AD: 86; NC: 81</td><td align="left" valign="top">sMRI</td><td align="left" valign="top">ADNI database</td><td align="left" valign="top">RF</td><td align="left" valign="top">67</td><td align="left" valign="top">100</td><td align="left" valign="top">10-fold CV; hold-out</td><td align="left" valign="top">NR</td><td align="left" valign="top">NR</td></tr></tbody></table><table-wrap-foot><fn id="table1fn1"><p><sup>a</sup>AD: Alzheimer disease.</p></fn><fn id="table1fn2"><p><sup>b</sup>NC: normal cognitive.</p></fn><fn id="table1fn3"><p><sup>c</sup>sMRI: structural magnetic resonance imaging.</p></fn><fn id="table1fn4"><p><sup>d</sup>PET: positron emission tomography. </p></fn><fn id="table1fn5"><p><sup>e</sup>ADNI: Alzheimer&#x2019;s Disease Neuroimaging Initiative.</p></fn><fn id="table1fn6"><p><sup>f</sup>SVM: support vector machine.</p></fn><fn id="table1fn7"><p><sup>g</sup>NR: no report.</p></fn><fn id="table1fn8"><p><sup>h</sup>CV: cross-validation.</p></fn><fn id="table1fn9"><p><sup>i</sup>LDA: linear discriminant analysis.</p></fn><fn id="table1fn10"><p><sup>j</sup>LOOCV: leave-one-out cross-validation.</p></fn><fn id="table1fn11"><p><sup>k</sup>OPLS: orthogonal partial least squares.</p></fn><fn id="table1fn12"><p><sup>l</sup>DBM: deep Boltzmann machine.</p></fn><fn id="table1fn13"><p><sup>m</sup>DFA: discriminant function analysis.</p></fn><fn id="table1fn14"><p><sup>n</sup>CNN: convolutional neural network.</p></fn><fn id="table1fn15"><p><sup>o</sup>MDNN: multiscale deep neural network.</p></fn><fn id="table1fn16"><p><sup>p</sup>SRC: sparse representation&#x2013;based classifier.</p></fn><fn id="table1fn17"><p><sup>q</sup>GMM: Gaussian mixture model.</p></fn><fn id="table1fn18"><p><sup>r</sup>QDA: quadratic discriminant analysis.</p></fn><fn id="table1fn19"><p><sup>s</sup>BEGAN: boundary equilibrium generative adversarial network.</p></fn><fn id="table1fn20"><p><sup>t</sup>LP: linear program.</p></fn><fn id="table1fn21"><p><sup>u</sup>RF: random forest.</p></fn><fn id="table1fn22"><p><sup>v</sup>FSBi-LSTM: fully stacked bidirectional long short-term memory.</p></fn><fn id="table1fn23"><p><sup>w</sup>OASIS: Open Access Series of Imaging Studies.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-3"><title>Quality Assessment Results of Included Studies</title><p>Nine (24%) studies were high quality, characterized by adequate sample sizes, rigorous validation (eg, multicenter external validation), and standardized reporting. Moderate-quality studies (n=19, 50%) met basic methodological standards (eg, cross-validation) but had limitations such as single-center data or insufficient clinical correlation analyses. Ten (26%) studies were low quality due to small sample sizes (&#x003C;100 cases) and inadequate validation strategies, compromising external validity. The results of quality assessments for each study are detailed in Table S4 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec><sec id="s3-4"><title>Data Analysis Results</title><sec id="s3-4-1"><title>Overall Diagnostic Performance</title><p>Analysis of 29 sMRI-related contingency tables demonstrated a pooled sensitivity of 88% (95% CI 86%-89%), a specificity of 92% (95% CI 90%-93%), and an SROC-AUC of 0.94 (95% CI 0.92-0.96). Subgroup analyses stratified by algorithm type revealed that ML (19 tables) achieved a sensitivity of 86% (95% CI 85%-88%), a specificity of 91% (95% CI 88%-93%), and an SROC-AUC of 0.88 (95% CI 0.85-0.90), while DL (10 tables) showed improved performance with a sensitivity of 88% (95% CI 86%-91%), a specificity of 92% (95% CI 90%-94%), and an SROC-AUC of 0.96 (95% CI 0.94-0.97). Stratification by study quality indicated that moderate-to-high-quality studies (24 tables) yielded a sensitivity of 87% (95% CI 85%-89%), a specificity of 91% (95% CI 89%-93%), and an SROC-AUC of 0.94 (95% CI 0.92-0.96), whereas low-quality studies (5 tables) reported elevated metrics with a sensitivity of 91% (95% CI 87%-94%), a specificity of 95% (95% CI 92%-97%), and an SROC-AUC of 0.98 (95% CI 0.96-0.99). Validation strategy&#x2013;based analysis showed that internal validation (25 tables) achieved a sensitivity of 88% (95% CI 86%-90%), a specificity of 92% (95% CI 90%-94%), and an SROC-AUC of 0.95 (95% CI 0.92-0.96), while external validation (4 tables) demonstrated marginally lower performance with a sensitivity of 85% (95% CI 81%-89%), a specificity of 91% (95% CI 85%-94%), and an SROC-AUC of 0.93 (95% CI 0.90-0.95).</p><p>For <sup>18</sup>F-FDG PET (27 tables), pooled estimates demonstrated a sensitivity of 90% (95% CI 88%-92%), a specificity of 93% (95% CI 91%-94%), and an SROC-AUC of 0.96 (95% CI 0.94-0.98). Subgroup analyses stratified by algorithm type revealed that ML (15 tables) achieved a sensitivity of 89% (95% CI 86%-90%), a specificity of 91% (95% CI 88%-93%), and an SROC-AUC of 0.94 (95% CI 0.91-0.96), while DL (12 tables) exhibited superior performance with a sensitivity of 91% (95% CI 89%-93%), a specificity of 94% (95% CI 93%-96%), and an SROC-AUC of 0.98 (95% CI 0.96-0.99). Moderate-to-high-quality studies (21 tables) demonstrated a sensitivity of 90% (95% CI 88%-92%), a specificity of 93% (95% CI 91%-94%), and an SROC-AUC of 0.96 (95% CI 0.94-0.98), whereas low-quality studies (6 tables) showed comparable metrics with a sensitivity of 91% (95% CI 86%-94%), a specificity of 93% (95% CI 87%-96%), and an SROC-AUC of 0.96 (95% CI 0.94-0.98). Internal validation (24 tables) yielded a sensitivity of 90% (95% CI 89%-92%), a specificity of 93% (95% CI 91%-94%), and an SROC-AUC of 0.96 (95% CI 0.94%-0.97%), while external validation data (3 tables) were insufficient for SROC-AUC calculation but reported a sensitivity of 87% (95% CI 81%-93%) and a specificity of 95% (95% CI 91%-97%). The data analysis results are shown in <xref ref-type="table" rid="table2">Table 2</xref>.</p><table-wrap id="t2" position="float"><label>Table 2.</label><caption><p>Summary estimates and meta-regression of pooled performance of all studies.</p></caption><table id="table2" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Main and subordinate directory</td><td align="left" valign="bottom">Tables, n</td><td align="left" valign="bottom">Sensitivity (%; 95% CI)</td><td align="left" valign="bottom"><italic>I</italic><sup>2</sup></td><td align="left" valign="bottom">Specificity (%; 95% CI)</td><td align="left" valign="bottom"><italic>I</italic><sup>2</sup></td><td align="left" valign="bottom">Joint <italic>P</italic> value</td><td align="left" valign="bottom">AUC<sup><xref ref-type="table-fn" rid="table2fn1">a</xref></sup> (95% CI)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="9">sMRI<sup><xref ref-type="table-fn" rid="table2fn2">b</xref></sup></td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Overall</td><td align="left" valign="top">29</td><td align="left" valign="top">88 (86-89)</td><td align="left" valign="top">55.32</td><td align="left" valign="top">92 (90-93)</td><td align="left" valign="top">74.08</td><td align="left" valign="top">&#x2014;<sup><xref ref-type="table-fn" rid="table2fn3">j</xref></sup></td><td align="left" valign="top">0.94 (0.92-0.96)</td></tr><tr><td align="left" valign="top" colspan="7"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Study quality</td><td align="left" valign="top">.02</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Medium-to-high quality</td><td align="left" valign="top">24</td><td align="left" valign="top">87 (85-89)</td><td align="left" valign="top">57.99</td><td align="left" valign="top">91 (89-93)</td><td align="left" valign="top">75.94</td><td align="left" valign="top"/><td align="left" valign="top">0.94 (0.92-0.96)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Low quality</td><td align="left" valign="top">5</td><td align="left" valign="top">91 (87-94)</td><td align="left" valign="top">10.2</td><td align="left" valign="top">95 (92-97)</td><td align="left" valign="top">0</td><td align="left" valign="top"/><td align="left" valign="top">0.98 (0.96-0.99)</td></tr><tr><td align="left" valign="top" colspan="7"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Algorithm type</td><td align="left" valign="top">.40</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ML<sup><xref ref-type="table-fn" rid="table2fn4">d</xref></sup></td><td align="left" valign="top">19</td><td align="left" valign="top">86 (85-88)</td><td align="left" valign="top">21.23</td><td align="left" valign="top">91 (88-93)</td><td align="left" valign="top">73.82</td><td align="left" valign="top"/><td align="left" valign="top">0.88 (0.85-0.90)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>DL<sup><xref ref-type="table-fn" rid="table2fn5">e</xref></sup></td><td align="left" valign="top">10</td><td align="left" valign="top">88 (86-91)</td><td align="left" valign="top">76.56</td><td align="left" valign="top">92 (90-94)</td><td align="left" valign="top">76.53</td><td align="left" valign="top"/><td align="left" valign="top">0.96 (0.94-0.97)</td></tr><tr><td align="left" valign="top" colspan="7"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Validation strategy</td><td align="left" valign="top">.33</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Internal validation</td><td align="left" valign="top">25</td><td align="left" valign="top">88 (86-90)</td><td align="left" valign="top">46.38</td><td align="left" valign="top">92 (90-94)</td><td align="left" valign="top">71.65</td><td align="left" valign="top"/><td align="left" valign="top">0.95 (0.92-0.96)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>External validation</td><td align="left" valign="top">4</td><td align="left" valign="top">85 (81-89)</td><td align="left" valign="top">72.26</td><td align="left" valign="top">91 (85-94)</td><td align="left" valign="top">83.65</td><td align="left" valign="top"/><td align="left" valign="top">0.93 (0.90-0.95)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>SVM<sup><xref ref-type="table-fn" rid="table2fn6">f</xref></sup></td><td align="left" valign="top">10</td><td align="left" valign="top">87 (85-90)</td><td align="left" valign="top">17.47</td><td align="left" valign="top">93 (90-95)</td><td align="left" valign="top">57.65</td><td align="left" valign="top"/><td align="left" valign="top">0.93 (0.90-0.95)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>CNN<sup><xref ref-type="table-fn" rid="table2fn7">g</xref></sup></td><td align="left" valign="top">9</td><td align="left" valign="top">88 (85-91)</td><td align="left" valign="top">78.19</td><td align="left" valign="top">92 (90-94)</td><td align="left" valign="top">77.55</td><td align="left" valign="top"/><td align="left" valign="top">0.96 (0.93-0.97)</td></tr><tr><td align="left" valign="top" colspan="9"><sup>18</sup>F-FDG PET<sup><xref ref-type="table-fn" rid="table2fn8">k</xref></sup></td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Overall</td><td align="left" valign="top">27</td><td align="left" valign="top">90 (88-92)</td><td align="left" valign="top">42.04</td><td align="left" valign="top">93 (91-94)</td><td align="left" valign="top">61.49</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">0.96 (0.94-0.98)</td></tr><tr><td align="left" valign="top" colspan="7"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Study quality</td><td align="left" valign="top">.96</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Medium-to-high quality</td><td align="left" valign="top">21</td><td align="left" valign="top">90 (88-92)</td><td align="left" valign="top">44.58</td><td align="left" valign="top">93 (91-94)</td><td align="left" valign="top">58.52</td><td align="left" valign="top"/><td align="left" valign="top">0.96 (0.94-0.98)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Low quality</td><td align="left" valign="top">6</td><td align="left" valign="top">91 (86-94)</td><td align="left" valign="top">42.73</td><td align="left" valign="top">93 (87-96)</td><td align="left" valign="top">72.3</td><td align="left" valign="top"/><td align="left" valign="top">0.96 (0.94-0.98)</td></tr><tr><td align="left" valign="top" colspan="7"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Algorithm type</td><td align="left" valign="top">.01</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ML</td><td align="left" valign="top">15</td><td align="left" valign="top">89 (86-90)</td><td align="left" valign="top">1.61</td><td align="left" valign="top">91 (88-93)</td><td align="left" valign="top">45.64</td><td align="left" valign="top"/><td align="left" valign="top">0.94 (0.91-0.96)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>DL</td><td align="left" valign="top">12</td><td align="left" valign="top">91 (89-93)</td><td align="left" valign="top">55.23</td><td align="left" valign="top">94 (93-96)</td><td align="left" valign="top">45.92</td><td align="left" valign="top"/><td align="left" valign="top">0.98 (0.96-0.99)</td></tr><tr><td align="left" valign="top" colspan="6"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Validation strategy</td><td align="left" valign="top"/><td align="left" valign="top">.26</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>Internal validation</td><td align="left" valign="top">24</td><td align="left" valign="top">90 (89-92)</td><td align="left" valign="top">34.61</td><td align="left" valign="top">93 (91-94)</td><td align="left" valign="top">64.9</td><td align="left" valign="top"/><td align="left" valign="top">0.96 (0.94-0.97)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>External validation</td><td align="left" valign="top">3</td><td align="left" valign="top">87 (81-93)</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top">95 (91-99)</td><td align="left" valign="top">&#x2014;</td><td align="left" valign="top"/><td align="left" valign="top">&#x2014;</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>SVM</td><td align="left" valign="top">11</td><td align="left" valign="top">90 (87-92)</td><td align="left" valign="top">0</td><td align="left" valign="top">92 (89-94)</td><td align="left" valign="top">37.5</td><td align="left" valign="top"/><td align="left" valign="top">0.94 (0.92-0.96)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>CNN</td><td align="left" valign="top">8</td><td align="left" valign="top">91 (88-94)</td><td align="left" valign="top">69.76</td><td align="left" valign="top">93 (92-95)</td><td align="left" valign="top">34.63</td><td align="left" valign="top"/><td align="left" valign="top">0.97 (0.95-0.98)</td></tr></tbody></table><table-wrap-foot><fn id="table2fn1"><p><sup>a</sup>AUC: area under the receiver operating characteristic curve.</p></fn><fn id="table2fn2"><p><sup>b</sup>sMRI: structural magnetic resonance imaging.</p></fn><fn id="table2fn3"><p><sup>c</sup>Not applicable.</p></fn><fn id="table2fn4"><p><sup>d</sup>ML: machine learning.</p></fn><fn id="table2fn5"><p><sup>e</sup>DL: deep learning.</p></fn><fn id="table2fn6"><p><sup>f</sup>SVM: support vector machine.</p></fn><fn id="table2fn7"><p><sup>g</sup>CNN: convolutional neural network.</p></fn><fn id="table2fn8"><p><sup>h</sup><sup>18</sup>F-FDG PET: fluorine-18 fluorodeoxyglucose positron emission tomograph.</p></fn></table-wrap-foot></table-wrap></sec><sec id="s3-4-2"><title>Moderate-to-High-Quality Study Subgroup Analysis</title><p>For sMRI, pooled sensitivity, specificity, and SROC-AUC were 87% (95% CI 85%&#x2010;89%), 91% (95% CI 89%&#x2010;93%), and 0.94 (95% CI 0.92&#x2010;0.96), respectively (see <xref ref-type="fig" rid="figure2">Figures 2</xref> and <xref ref-type="fig" rid="figure3">3</xref>). Stratification by algorithm type indicated that ML models achieved 86% (95% CI 84%&#x2010;88%) sensitivity, 89% (95% CI 86%&#x2010;92%) specificity, and an SROC-AUC of 0.89 (95% CI 0.86&#x2010;0.92), while DL models demonstrated 88% (95% CI 86%&#x2010;91%) sensitivity, 92% (95% CI 90%&#x2010;94%) specificity, and an SROC-AUC of 0.96 (95% CI 0.94&#x2010;0.97).</p><p>For <sup>18</sup>F-FDG PET, pooled sensitivity, specificity, and SROC-AUC were 90% (95% CI 88%&#x2010;92%), 93% (95% CI 91%&#x2010;94%), and 0.96 (95% CI 0.94&#x2010;0.98), respectively (see <xref ref-type="fig" rid="figure4">Figures 4</xref> and <xref ref-type="fig" rid="figure5">5</xref>). Subgroup analyses revealed ML models achieved 89% (95% CI 86&#x2010;91%) sensitivity, 91% (95% CI 87%&#x2010;93%) specificity, and an SROC-AUC of 0.95 (95% CI 0.92&#x2010;0.96), whereas DL models outperformed with 91% (95% CI 89%&#x2010;93%) sensitivity, 94% (95% CI 93%&#x2010;96%) specificity, and SROC-AUC 0.97 (95% CI 0.96&#x2010;0.99). The data analysis results are also shown in <xref ref-type="table" rid="table3">Table 3</xref>.</p><fig position="float" id="figure2"><label>Figure 2.</label><caption><p>The pooled sensitivities and specificities based on moderate-to-high-quality studies for structural magnetic resonance imaging [<xref ref-type="bibr" rid="ref60">23</xref>,<xref ref-type="bibr" rid="ref57">24</xref>,<xref ref-type="bibr" rid="ref55">25</xref>,<xref ref-type="bibr" rid="ref53">26</xref>,<xref ref-type="bibr" rid="ref49">27</xref>,<xref ref-type="bibr" rid="ref48">34</xref>,<xref ref-type="bibr" rid="ref47">41</xref>,<xref ref-type="bibr" rid="ref46">43</xref>,<xref ref-type="bibr" rid="ref45">45</xref>,<xref ref-type="bibr" rid="ref43">46</xref>,<xref ref-type="bibr" rid="ref41">47</xref>,<xref ref-type="bibr" rid="ref34">48</xref>,<xref ref-type="bibr" rid="ref27">49</xref>,<xref ref-type="bibr" rid="ref26">53</xref>,<xref ref-type="bibr" rid="ref25">55</xref>,<xref ref-type="bibr" rid="ref24">57</xref>,<xref ref-type="bibr" rid="ref23">60</xref>].</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="aging_v8i1e76981_fig02.png"/></fig><fig position="float" id="figure3"><label>Figure 3.</label><caption><p>Summary receiver operating characteristic (SROC) based on moderate-to-high-quality studies curves for structural magnetic resonance imaging. AUC: area under the receiver operating characteristic curve area; SENS: sensitivity; SPEC: specificity.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="aging_v8i1e76981_fig03.png"/></fig><fig position="float" id="figure4"><label>Figure 4.</label><caption><p>The pooled sensitivities and specificities based on moderate-to-high-quality studies for fluorine-18 fluorodeoxyglucose positron emission tomography (<sup>18</sup>F-FDG PET) [<xref ref-type="bibr" rid="ref50">23</xref>,<xref ref-type="bibr" rid="ref49">24</xref>,<xref ref-type="bibr" rid="ref46">27</xref>,<xref ref-type="bibr" rid="ref45">29</xref>,<xref ref-type="bibr" rid="ref45">31</xref>,<xref ref-type="bibr" rid="ref44">33</xref>,<xref ref-type="bibr" rid="ref43">35</xref>,<xref ref-type="bibr" rid="ref42">38</xref>,<xref ref-type="bibr" rid="ref41">39</xref>,<xref ref-type="bibr" rid="ref39">41</xref>,<xref ref-type="bibr" rid="ref38">42</xref>,<xref ref-type="bibr" rid="ref35">43</xref>,<xref ref-type="bibr" rid="ref33">44</xref>,<xref ref-type="bibr" rid="ref31">45</xref>,<xref ref-type="bibr" rid="ref29">45</xref>,<xref ref-type="bibr" rid="ref27">46</xref>,<xref ref-type="bibr" rid="ref24">49</xref>,<xref ref-type="bibr" rid="ref23">50</xref>].</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="aging_v8i1e76981_fig04.png"/></fig><fig position="float" id="figure5"><label>Figure 5.</label><caption><p>Summary receiver operating characteristic (SROC) based on moderate-to-high-quality studies curves for fluorine-18 fluorodeoxyglucose positron emission tomography (<sup>18</sup>F-FDG PET). AUC: area under the receiver operating characteristic curve area; SENS: sensitivity; SPEC: specificity.</p></caption><graphic alt-version="no" mimetype="image" position="float" xlink:type="simple" xlink:href="aging_v8i1e76981_fig05.png"/></fig><table-wrap id="t3" position="float"><label>Table 3.</label><caption><p>Summary estimates and meta-regression of pooled performance of moderate-to-high-quality studies.</p></caption><table id="table3" frame="hsides" rules="groups"><thead><tr><td align="left" valign="bottom" colspan="2">Main and subordinate directory</td><td align="left" valign="bottom">Tables, n</td><td align="left" valign="bottom">Sensitivity (%; 95% CI)</td><td align="left" valign="bottom"><italic>I</italic><sup>2</sup></td><td align="left" valign="bottom">Specificity (%; 95% CI)</td><td align="left" valign="bottom"><italic>I</italic><sup>2</sup></td><td align="left" valign="bottom">Joint <italic>P</italic> value</td><td align="left" valign="bottom">AUC<sup><xref ref-type="table-fn" rid="table3fn1">a</xref></sup> (95% CI)</td></tr></thead><tbody><tr><td align="left" valign="top" colspan="7">Overall</td><td align="left" valign="top">.02</td><td align="left" valign="top"/></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>sMRI<sup><xref ref-type="table-fn" rid="table3fn2">b</xref></sup></td><td align="char" char="." valign="top">24</td><td align="char" char="." valign="top">87 (85-97)</td><td align="char" char="." valign="top">57.99</td><td align="char" char="." valign="top">91 (89-93)</td><td align="char" char="." valign="top">75.94</td><td align="char" char="." valign="top"/><td align="char" char="." valign="top">0.94 (0.92-0.96)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><sup>18</sup>F-FDG PET<sup><xref ref-type="table-fn" rid="table3fn3">c</xref></sup></td><td align="char" char="." valign="top">21</td><td align="char" char="." valign="top">90 (88-92)</td><td align="char" char="." valign="top">44.58</td><td align="char" char="." valign="top">93 (91-94)</td><td align="char" char="." valign="top">58.52</td><td align="left" valign="top"/><td align="char" char="." valign="top">0.96 (0.94-0.98)</td></tr><tr><td align="left" valign="top" colspan="9"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>ML<sup><xref ref-type="table-fn" rid="table3fn4">d</xref></sup></td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x2003;sMRI</named-content></td><td align="char" char="." valign="top">14</td><td align="char" char="." valign="top">86 (84-88)</td><td align="char" char="." valign="top">3.72</td><td align="char" char="." valign="top">89 (86-92)</td><td align="char" char="." valign="top">75.58</td><td align="left" valign="top"/><td align="char" char="." valign="top">0.89 (0.86-0.92)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><sup>18</sup>F-FDG PET</td><td align="char" char="." valign="top">10</td><td align="char" char="." valign="top">89 (86-91)</td><td align="char" char="." valign="top">16.51</td><td align="char" char="." valign="top">91 (87-93)</td><td align="char" char="." valign="top">53.95</td><td align="left" valign="top"/><td align="char" char="." valign="top">0.95 (0.92-0.96)</td></tr><tr><td align="left" valign="top" colspan="9"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content>DL<sup><xref ref-type="table-fn" rid="table3fn5">e</xref></sup></td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x2003;sMRI</named-content></td><td align="char" char="." valign="top">10</td><td align="char" char="." valign="top">88 (86-91)</td><td align="char" char="." valign="top">76.56</td><td align="char" char="." valign="top">92 (90-94)</td><td align="char" char="." valign="top">76.53</td><td align="left" valign="top"/><td align="char" char="." valign="top">0.96 (0.94-0.97)</td></tr><tr><td align="left" valign="top" colspan="2"><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><named-content content-type="indent">&#x00A0;&#x00A0;&#x00A0;&#x00A0;</named-content><sup>18</sup>F-FDG PET</td><td align="char" char="." valign="top">11</td><td align="char" char="." valign="top">91 (89-93)</td><td align="char" char="." valign="top">54.59</td><td align="char" char="." valign="top">94 (93-95)</td><td align="char" char="." valign="top">36.23</td><td align="left" valign="top"/><td align="char" char="." valign="top">0.97 (0.96-0.99)</td></tr></tbody></table><table-wrap-foot><fn id="table3fn1"><p><sup>a</sup>AUC: area under the receiver operating characteristic curve.</p></fn><fn id="table3fn2"><p><sup>b</sup>sMRI: structural magnetic resonance imaging.</p></fn><fn id="table3fn3"><p><sup>c</sup><sup>18</sup>F-FDG PET: fluorine-18 fluorodeoxyglucose positron emission tomography.</p></fn><fn id="table3fn4"><p><sup>d</sup>ML: machine learning.</p></fn><fn id="table3fn5"><p><sup>e</sup>DL: deep learning.</p></fn></table-wrap-foot></table-wrap></sec></sec><sec id="s3-5"><title>Exploration of Statistical Heterogeneity Sources</title><sec id="s3-5-1"><title>Full Study Cohort</title><p>For sMRI contingency tables, sensitivity exhibited moderate heterogeneity (<italic>I</italic>&#x00B2;=55.32%), while specificity showed high heterogeneity (<italic>I</italic>&#x00B2;=74.08%). Threshold effect testing revealed no significant threshold effect (<italic>r</italic>=&#x2212;0.101, <italic>P</italic>=.60). Subgroup analyses identified study quality stratification (<italic>P</italic>=.02) as a source of heterogeneity, with moderate-to-high-quality studies demonstrating lower sensitivity (87% vs 91%) and specificity (91% vs 95%) compared to low-quality studies. Algorithm type (<italic>P</italic>=.40) and validation strategy (<italic>P</italic>=.33) were not significant contributors.</p><p>For <sup>18</sup>F-FDG PET analyses, sensitivity and specificity displayed moderate (<italic>I</italic>&#x00B2;=42.04%) and high heterogeneity (<italic>I</italic>&#x00B2;=61.49%), respectively, with no threshold effect (<italic>r</italic>=&#x2212;0.087, <italic>P</italic>=.67). Algorithm type (<italic>P</italic>=.01) significantly influenced heterogeneity, as ML models demonstrated lower sensitivity (89% vs 91%) and specificity (91% vs 94%) than DL. Study quality (<italic>P</italic>=.96) and validation strategy (<italic>P</italic>=.26) showed no significant impact. The data analysis results are also shown in <xref ref-type="table" rid="table2">Table 2</xref>.</p></sec><sec id="s3-5-2"><title>Moderate-to-High-Quality Study Subgroups</title><p>For sMRI-based models, ML implementations demonstrated minimal sensitivity heterogeneity (<italic>I</italic>&#x00B2;=3.72%) but high specificity heterogeneity (<italic>I</italic>&#x00B2;=75.58%). Exclusion of studies using traditional ensemble algorithms (random forest, boosting) reduced specificity heterogeneity to moderate (<italic>I</italic>&#x00B2;=45.47%), revealing that ensemble methods achieved higher sensitivity (89% vs 87%) but significantly lower specificity (79% vs 93%) compared to nonensemble ML models (<italic>P</italic>&#x003C;.001). In contrast, DL-based sMRI models exhibited high heterogeneity for both sensitivity (<italic>I</italic>&#x00B2;=76.56%) and specificity (<italic>I</italic>&#x00B2;=76.53%). Removing external validation data mitigated heterogeneity to moderate (<italic>I</italic>&#x00B2;=54.36%) and low levels (<italic>I</italic>&#x00B2;=38.60%), respectively, with external validation studies showing significantly reduced sensitivity (85% vs 90%) and specificity (91% vs 93%) compared to internal validation (<italic>P</italic>&#x003C;.001).</p><p>For <sup>18</sup>F-FDG PET-based models, ML implementations showed low sensitivity heterogeneity (<italic>I</italic>&#x00B2;=16.51%) and moderate specificity heterogeneity (<italic>I</italic>&#x00B2;=53.95%). DL models exhibited moderate sensitivity heterogeneity (<italic>I</italic>&#x00B2;=54.59%) and low specificity heterogeneity (<italic>I</italic>&#x00B2;=36.23%). The data analysis results also are shown in <xref ref-type="table" rid="table3">Table 3</xref> and Table S5 in <xref ref-type="supplementary-material" rid="app1">Multimedia Appendix 1</xref>.</p></sec></sec><sec id="s3-6"><title>Publication Bias Test</title><p>Funnel plot analysis of the full study cohort revealed no significant publication bias for sMRI (<italic>P</italic>=.69), whereas the <sup>18</sup>F-FDG PET subgroup exhibited significant bias (<italic>P</italic>&#x003C;.001). In the moderate-to-high-quality cohort, both sMRI (<italic>P</italic>=.03) and <sup>18</sup>F-FDG PET (<italic>P</italic>=.01) demonstrated publication bias. However, subgroup analyses showed no significant bias for sMRI+ML (<italic>P</italic>=.06), sMRI+DL (<italic>P</italic>=.89), <sup>18</sup>F-FDG PET+ML (<italic>P</italic>=.08), or <sup>18</sup>F-FDG PET+DL (<italic>P</italic>=.28).</p></sec></sec><sec id="s4" sec-type="discussion"><title>Discussion</title><sec id="s4-1"><title>Principal Findings</title><p>This meta-analysis confirms that AI-enhanced sMRI and <sup>18</sup>F-FDG PET achieve high diagnostic accuracy for AD, with pooled SROC-AUCs of 0.94 and 0.96, respectively, outperforming conventional visual assessments [<xref ref-type="bibr" rid="ref61">61</xref>,<xref ref-type="bibr" rid="ref62">62</xref>]. <sup>18</sup>F-FDG PET demonstrated superior overall performance (<italic>P</italic>=.02), likely attributable to its sensitivity to AD-specific metabolic abnormalities. Notably, ML amplified PET&#x2019;s diagnostic advantage (SROC-AUC: 0.95 vs 0.89), while DL narrowed the gap (SROC-AUC: 0.97 vs 0.96), highlighting DL&#x2019;s capacity to extract complex metabolic features and mitigate structural imaging limitations [<xref ref-type="bibr" rid="ref63">63</xref>].</p><p>Subgroup analyses identified three key determinants of heterogeneity and performance variation. First, methodological quality significantly influenced sMRI models: moderate-to-high-quality studies reported lower sensitivity (87% vs 91%) and specificity (91% vs 95%) compared to low-quality studies (<italic>P</italic>=.02), suggesting inflated performance in the latter due to small sample sizes, single-center data, or nonstandardized preprocessing. Adherence to TRIPOD-AI guidelines and transparent reporting of preprocessing workflows are critical for future studies [<xref ref-type="bibr" rid="ref18">18</xref>]. Second, algorithm type drove technical divergence in PET models: DL outperformed ML (sensitivity: 91% vs 89%; specificity: 94% vs 91%; <italic>P</italic>=.01) through end-to-end feature learning. However, ML&#x2019;s interpretability better aligns with clinical demands for transparency, whereas DL&#x2019;s reliance on large annotated datasets and computing resources limits its deployment in resource-constrained regions [<xref ref-type="bibr" rid="ref64">64</xref>]. Third, validation strategies exposed generalizability limitations: external validation of sMRI+DL models showed reduced sensitivity (85% vs 90%) and specificity (91% vs 93%) compared to internal validation (<italic>P</italic>&#x003C;.001).</p><p>The reduced diagnostic accuracy observed in externally validated models may stem from (1) data distribution shift&#x2014;differences in feature distributions, class balance, and temporal trends between training and external datasets; (2) overfitting&#x2014;models may have overfitted to training-specific noise or spurious patterns, limiting generalization; and (3) implementation and annotation inconsistencies&#x2014;variations in data preprocessing, feature scaling, and labeling protocols across datasets [<xref ref-type="bibr" rid="ref65">65</xref>].</p></sec><sec id="s4-2"><title>Future Directions and Study Limitations</title><p>This study has two methodological constraints: potential selection bias from English-only inclusion and possible overestimation by prioritizing optimal contingency tables. Future research should focus on enhancing evidence robustness through multinational, multiethnic cohorts; improving transparency via open-source preprocessing codes, findable, accessible, interoperable, and reusable-compliant data sharing, and independent validation; and balancing performance and interpretability via explainable DL frameworks to meet clinical ethical standards [<xref ref-type="bibr" rid="ref66">66</xref>].</p><p>We also acknowledge the risk of publication bias, as studies with positive results are more likely to be published, particularly in AI-related research where rapid progress and selective reporting may skew the literature. Additionally, language and geographic biases may exist since only English-language articles were included and most studies originated from a limited number of regions (eg, China, the United States, and Europe). This may limit the generalizability of our findings to underrepresented regions.</p><p>Despite <sup>18</sup>F-FDG PET&#x2019;s higher accuracy, its radiation exposure and cost hinder widespread screening [<xref ref-type="bibr" rid="ref67">67</xref>]. Conversely, sMRI&#x2019;s cost-effectiveness and noninvasiveness position it as a first-line screening tool, with PET reserved for confirmatory testing in complex cases&#x2014;a tiered diagnostic strategy. The 2023 Responsible AI for Social and Ethical Healthcare consensus implementation priorities, including transparent cost-benefit frameworks cross-modal standardization, and dynamic performance monitoring [<xref ref-type="bibr" rid="ref68">68</xref>].</p></sec><sec id="s4-3"><title>Comparison With Existing Reviews</title><p>Although previous reviews have explored the diagnostic accuracy of AI in AD, studies involving meta-analyses remain scarce [<xref ref-type="bibr" rid="ref69">69</xref>-<xref ref-type="bibr" rid="ref71">71</xref>]. Conducting such meta-analyses faces significant challenges, particularly due to substantial methodological heterogeneity across studies. This heterogeneity manifests in multiple dimensions, including variations in neuroimaging modalities, disparities in model validation strategies, and differences in algorithm types&#x2014;all of which influence AD diagnostic performance and complicate the synthesis of evidence.</p><p>Borchert et al [<xref ref-type="bibr" rid="ref70">70</xref>] conducted a comprehensive systematic review of 255 neuroimaging studies utilizing AI for dementia diagnosis and prognosis. Their findings demonstrated that discriminative models, particularly DL approaches, outperformed algorithmic classifiers in distinguishing AD patients from healthy controls. However, they emphasized critical methodological limitations, with conclusions primarily relying on qualitative synthesis rather than quantitative evidence.</p><p>In a systematic review and meta-analysis by Sun et al [<xref ref-type="bibr" rid="ref71">71</xref>], the diagnostic accuracy of DL models based on <sup>18</sup>F-FDG PET for AD was investigated. While the study reported excellent diagnostic performance, notable heterogeneity was observed during meta-analysis, raising concerns about the reliability of the findings. Furthermore, the study focused exclusively on DL, overlooking the widespread application of traditional ML methods in current clinical research for AD diagnostic modeling.</p><p>In contrast, our meta-analysis incorporates a broader range of studies, rigorously controls methodological heterogeneity through stringent quality assessment and detailed subgroup analyses, and systematically evaluates the diagnostic accuracy of both ML and DL in AD. By emphasizing methodological rigor and the importance of external validation in AI-assisted neuroimaging for AD diagnosis, this study addresses critical gaps in the existing literature.</p><p>Although our subgroup comparisons between ML and DL models provide a useful overview of broad methodological trends, it must be noted that algorithm complexity, training data size, and model optimization procedures vary considerably within each group. The observed performance differences may, therefore, reflect not only model class but also differences in dataset size, feature representation, and implementation quality. Future studies should aim to compare individual algorithms under standardized conditions.</p></sec><sec id="s4-4"><title>Conclusions</title><p>In conclusion, AI can effectively support the diagnosis of AD using sMRI and <sup>18</sup>F-FDG PET imaging. Among these approaches, combining PET imaging with DL techniques yields the highest diagnostic accuracy. These findings suggest that a future direction lies in integrating precision neuroimaging with AI tools. To bring such systems into routine clinical use&#x2014;helping doctors detect AD earlier, personalize treatment, and improve patient outcomes&#x2014;future studies should focus on repeated validation with high-quality clinical datasets and the development of standardized implementation protocols.</p></sec></sec></body><back><ack><p>This research was funded by the Henan Province Medical Science and Technology Public Relations Plan Joint Construction Project (LHGJ20230402).</p></ack><fn-group><fn fn-type="conflict"><p>None declared.</p></fn></fn-group><glossary><title>Abbreviations</title><def-list><def-item><term id="abb1"><sup>18</sup>FDG-PET</term><def><p>fluorine-18 fluorodeoxyglucose positron emission tomography</p></def></def-item><def-item><term id="abb2">AD</term><def><p>Alzheimer disease</p></def></def-item><def-item><term id="abb3">AI</term><def><p>artificial intelligence</p></def></def-item><def-item><term id="abb4">DL</term><def><p>deep learning</p></def></def-item><def-item><term id="abb5">MeSH</term><def><p>Medical Subject Headings</p></def></def-item><def-item><term id="abb6">ML</term><def><p>machine learning</p></def></def-item><def-item><term id="abb7">PICOS</term><def><p>Population, Intervention, Comparison, Outcome, Study Design</p></def></def-item><def-item><term id="abb8">PRISMA-DTA</term><def><p>Preferred Reporting Items for Systematic Reviews and Meta-Analyses of Diagnostic Test Accuracy</p></def></def-item><def-item><term id="abb9">sMRI</term><def><p>structural magnetic resonance imaging</p></def></def-item><def-item><term id="abb10">SROC-AUC</term><def><p>summary receiver operating characteristic curve area</p></def></def-item><def-item><term id="abb11">TRIPOD-AI</term><def><p>Transparent Reporting of a Multivariable Prediction Model for Individual Prognosis or Diagnosis&#x2013;Artificial Intelligence</p></def></def-item></def-list></glossary><ref-list><title>References</title><ref id="ref1"><label>1</label><nlm-citation citation-type="journal"><article-title>2023 Alzheimer&#x2019;s disease facts and figures</article-title><source>Alzheimers Dement</source><year>2023</year><month>04</month><volume>19</volume><issue>4</issue><fpage>1598</fpage><lpage>1695</lpage><pub-id pub-id-type="doi">10.1002/alz.13016</pub-id></nlm-citation></ref><ref id="ref2"><label>2</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ren</surname><given-names>R</given-names> </name><name name-style="western"><surname>Qi</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lin</surname><given-names>S</given-names> </name><etal/></person-group><article-title>The China Alzheimer Report 2022</article-title><source>Gen Psychiatr</source><year>2022</year><volume>35</volume><issue>1</issue><fpage>e100751</fpage><pub-id pub-id-type="doi">10.1136/gpsych-2022-100751</pub-id><pub-id pub-id-type="medline">35372787</pub-id></nlm-citation></ref><ref id="ref3"><label>3</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jia</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wei</surname><given-names>C</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>S</given-names> </name><etal/></person-group><article-title>The cost of Alzheimer&#x2019;s disease in China and re-estimation of costs worldwide</article-title><source>Alzheimers Dement</source><year>2018</year><month>04</month><volume>14</volume><issue>4</issue><fpage>483</fpage><lpage>491</lpage><pub-id pub-id-type="doi">10.1016/j.jalz.2017.12.006</pub-id><pub-id pub-id-type="medline">29433981</pub-id></nlm-citation></ref><ref id="ref4"><label>4</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Scheltens</surname><given-names>P</given-names> </name><name name-style="western"><surname>Blennow</surname><given-names>K</given-names> </name><name name-style="western"><surname>Breteler</surname><given-names>MMB</given-names> </name><etal/></person-group><article-title>Alzheimer&#x2019;s disease</article-title><source>Lancet</source><year>2016</year><month>07</month><day>30</day><volume>388</volume><issue>10043</issue><fpage>505</fpage><lpage>517</lpage><pub-id pub-id-type="doi">10.1016/S0140-6736(15)01124-1</pub-id><pub-id pub-id-type="medline">26921134</pub-id></nlm-citation></ref><ref id="ref5"><label>5</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pickett</surname><given-names>AC</given-names> </name><name name-style="western"><surname>Valdez</surname><given-names>D</given-names> </name><name name-style="western"><surname>White</surname><given-names>LA</given-names> </name><etal/></person-group><article-title>The CareVirtue digital journal for family and friend caregivers of people living with Alzheimer disease and related dementias: exploratory topic modeling and user engagement study</article-title><source>JMIR Aging</source><year>2024</year><month>12</month><day>24</day><volume>7</volume><fpage>e67992</fpage><pub-id pub-id-type="doi">10.2196/67992</pub-id><pub-id pub-id-type="medline">39719081</pub-id></nlm-citation></ref><ref id="ref6"><label>6</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cahill</surname><given-names>S</given-names> </name></person-group><article-title>WHO&#x2019;s global action plan on the public health response to dementia: some challenges and opportunities</article-title><source>Aging Ment Health</source><year>2020</year><month>02</month><volume>24</volume><issue>2</issue><fpage>197</fpage><lpage>199</lpage><pub-id pub-id-type="doi">10.1080/13607863.2018.1544213</pub-id><pub-id pub-id-type="medline">30600688</pub-id></nlm-citation></ref><ref id="ref7"><label>7</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wu</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Fu</surname><given-names>L</given-names> </name><name name-style="western"><surname>Li</surname><given-names>Q</given-names> </name><etal/></person-group><article-title>Recent advancements in the early diagnosis and treatment of Alzheimer&#x2019;s disease</article-title><source>Adv Therap</source><year>2023</year><month>11</month><volume>6</volume><issue>11</issue><comment><ext-link ext-link-type="uri" xlink:href="https://onlinelibrary.wiley.com/toc/23663987/6/11">https://onlinelibrary.wiley.com/toc/23663987/6/11</ext-link></comment><pub-id pub-id-type="doi">10.1002/adtp.202300181</pub-id><pub-id pub-id-type="medline">36818419</pub-id></nlm-citation></ref><ref id="ref8"><label>8</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jack</surname><given-names>CR</given-names> </name><name name-style="western"><surname>Bennett</surname><given-names>DA</given-names> </name><name name-style="western"><surname>Blennow</surname><given-names>K</given-names> </name><etal/></person-group><article-title>NIA&#x2010;AA Research Framework: toward a biological definition of Alzheimer&#x2019;s disease</article-title><source>Alzheimer&#x2019;s &#x0026; Dementia</source><year>2018</year><month>04</month><volume>14</volume><issue>4</issue><fpage>535</fpage><lpage>562</lpage><pub-id pub-id-type="doi">10.1016/j.jalz.2018.02.018</pub-id></nlm-citation></ref><ref id="ref9"><label>9</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jack</surname><given-names>CR</given-names> </name><name name-style="western"><surname>Andrews</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Beach</surname><given-names>TG</given-names> </name><etal/></person-group><article-title>Revised criteria for diagnosis and staging of Alzheimer&#x2019;s disease: Alzheimer&#x2019;s Association Workgroup</article-title><source>Alzheimer&#x2019;s &#x0026; Dementia</source><year>2024</year><month>08</month><volume>20</volume><issue>8</issue><fpage>5143</fpage><lpage>5169</lpage><pub-id pub-id-type="doi">10.1002/alz.13859</pub-id></nlm-citation></ref><ref id="ref10"><label>10</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>By</surname><given-names>S</given-names> </name><name name-style="western"><surname>Kahl</surname><given-names>A</given-names> </name><name name-style="western"><surname>Cogswell</surname><given-names>PM</given-names> </name></person-group><article-title>Alzheimer&#x2019;s disease clinical trials: what have we learned from magnetic resonance imaging</article-title><source>J Magn Reson Imaging</source><year>2025</year><month>02</month><volume>61</volume><issue>2</issue><fpage>579</fpage><lpage>594</lpage><pub-id pub-id-type="doi">10.1002/jmri.29462</pub-id><pub-id pub-id-type="medline">39031716</pub-id></nlm-citation></ref><ref id="ref11"><label>11</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kuo</surname><given-names>RYL</given-names> </name><name name-style="western"><surname>Harrison</surname><given-names>C</given-names> </name><name name-style="western"><surname>Curran</surname><given-names>TA</given-names> </name><etal/></person-group><article-title>Artificial intelligence in fracture detection: a systematic review and meta-analysis</article-title><source>Radiology</source><year>2022</year><month>07</month><volume>304</volume><issue>1</issue><fpage>50</fpage><lpage>62</lpage><pub-id pub-id-type="doi">10.1148/radiol.211785</pub-id><pub-id pub-id-type="medline">35348381</pub-id></nlm-citation></ref><ref id="ref12"><label>12</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Wang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Xue</surname><given-names>L</given-names> </name><name name-style="western"><surname>Jiang</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Diagnostic performance of artificial intelligence-assisted PET imaging for Parkinson&#x2019;s disease: a systematic review and meta-analysis</article-title><source>NPJ Digit Med</source><year>2024</year><month>01</month><day>22</day><volume>7</volume><issue>1</issue><fpage>17</fpage><pub-id pub-id-type="doi">10.1038/s41746-024-01012-z</pub-id><pub-id pub-id-type="medline">38253738</pub-id></nlm-citation></ref><ref id="ref13"><label>13</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cheng</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Malekar</surname><given-names>M</given-names> </name><name name-style="western"><surname>He</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>High-throughput phenotyping of the symptoms of Alzheimer disease and related dementias using large language models: cross-sectional study</article-title><source>JMIR AI</source><year>2025</year><month>06</month><day>3</day><volume>4</volume><fpage>e66926</fpage><pub-id pub-id-type="doi">10.2196/66926</pub-id><pub-id pub-id-type="medline">40460418</pub-id></nlm-citation></ref><ref id="ref14"><label>14</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Bosco</surname><given-names>C</given-names> </name><name name-style="western"><surname>Shojaei</surname><given-names>F</given-names> </name><name name-style="western"><surname>Theisz</surname><given-names>AA</given-names> </name><etal/></person-group><article-title>Testing 3 modalities (voice assistant, chatbot, and mobile app) to assist older African American and Black adults in seeking information on Alzheimer disease and related dementias: wizard of Oz usability study</article-title><source>JMIR Form Res</source><year>2024</year><month>12</month><day>9</day><volume>8</volume><fpage>e60650</fpage><pub-id pub-id-type="doi">10.2196/60650</pub-id><pub-id pub-id-type="medline">39653372</pub-id></nlm-citation></ref><ref id="ref15"><label>15</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rudroff</surname><given-names>T</given-names> </name><name name-style="western"><surname>Rainio</surname><given-names>O</given-names> </name><name name-style="western"><surname>Kl&#x00E9;n</surname><given-names>R</given-names> </name></person-group><article-title>AI for the prediction of early stages of Alzheimer&#x2019;s disease from neuroimaging biomarkers&#x2014;a narrative review of a growing field</article-title><source>Neurol Sci</source><year>2024</year><month>11</month><volume>45</volume><issue>11</issue><fpage>5117</fpage><lpage>5127</lpage><pub-id pub-id-type="doi">10.1007/s10072-024-07649-8</pub-id><pub-id pub-id-type="medline">38866971</pub-id></nlm-citation></ref><ref id="ref16"><label>16</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jackson</surname><given-names>D</given-names> </name><name name-style="western"><surname>Turner</surname><given-names>R</given-names> </name></person-group><article-title>Power analysis for random-effects meta-analysis</article-title><source>Res Synth Methods</source><year>2017</year><month>09</month><volume>8</volume><issue>3</issue><fpage>290</fpage><lpage>302</lpage><pub-id pub-id-type="doi">10.1002/jrsm.1240</pub-id><pub-id pub-id-type="medline">28378395</pub-id></nlm-citation></ref><ref id="ref17"><label>17</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>McInnes</surname><given-names>MDF</given-names> </name><name name-style="western"><surname>Moher</surname><given-names>D</given-names> </name><name name-style="western"><surname>Thombs</surname><given-names>BD</given-names> </name><etal/></person-group><article-title>Preferred reporting items for a systematic review and meta-analysis of diagnostic test accuracy studies: the PRISMA-DTA statement</article-title><source>JAMA</source><year>2018</year><month>01</month><day>23</day><volume>319</volume><issue>4</issue><fpage>388</fpage><lpage>396</lpage><pub-id pub-id-type="doi">10.1001/jama.2017.19163</pub-id><pub-id pub-id-type="medline">29362800</pub-id></nlm-citation></ref><ref id="ref18"><label>18</label><nlm-citation citation-type="journal"><article-title>TRIPOD+AI statement: updated guidance for reporting clinical prediction models that use regression or machine learning methods</article-title><source>BMJ</source><year>2024</year><month>04</month><fpage>q902</fpage><pub-id pub-id-type="doi">10.1136/bmj.q902</pub-id></nlm-citation></ref><ref id="ref19"><label>19</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Singh</surname><given-names>S</given-names> </name><name name-style="western"><surname>Chang</surname><given-names>SM</given-names> </name><name name-style="western"><surname>Matchar</surname><given-names>DB</given-names> </name><name name-style="western"><surname>Bass</surname><given-names>EB</given-names> </name></person-group><article-title>Chapter 7: grading a body of evidence on diagnostic tests</article-title><source>J Gen Intern Med</source><year>2012</year><month>06</month><volume>27 Suppl 1</volume><issue>Suppl 1</issue><fpage>S47</fpage><lpage>55</lpage><pub-id pub-id-type="doi">10.1007/s11606-012-2021-9</pub-id><pub-id pub-id-type="medline">22648675</pub-id></nlm-citation></ref><ref id="ref20"><label>20</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ruppar</surname><given-names>T</given-names> </name></person-group><article-title>Meta-analysis: how to quantify and explain heterogeneity?</article-title><source>Eur J Cardiovasc Nurs</source><year>2020</year><month>10</month><volume>19</volume><issue>7</issue><fpage>646</fpage><lpage>652</lpage><pub-id pub-id-type="doi">10.1177/1474515120944014</pub-id><pub-id pub-id-type="medline">32757621</pub-id></nlm-citation></ref><ref id="ref21"><label>21</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Rutter</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Gatsonis</surname><given-names>CA</given-names> </name></person-group><article-title>A hierarchical regression approach to meta-analysis of diagnostic test accuracy evaluations</article-title><source>Stat Med</source><year>2001</year><month>10</month><day>15</day><volume>20</volume><issue>19</issue><fpage>2865</fpage><lpage>2884</lpage><pub-id pub-id-type="doi">10.1002/sim.942</pub-id><pub-id pub-id-type="medline">11568945</pub-id></nlm-citation></ref><ref id="ref22"><label>22</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Reitsma</surname><given-names>JB</given-names> </name><name name-style="western"><surname>Glas</surname><given-names>AS</given-names> </name><name name-style="western"><surname>Rutjes</surname><given-names>AWS</given-names> </name><name name-style="western"><surname>Scholten</surname><given-names>RJPM</given-names> </name><name name-style="western"><surname>Bossuyt</surname><given-names>PM</given-names> </name><name name-style="western"><surname>Zwinderman</surname><given-names>AH</given-names> </name></person-group><article-title>Bivariate analysis of sensitivity and specificity produces informative summary measures in diagnostic reviews</article-title><source>J Clin Epidemiol</source><year>2005</year><month>10</month><volume>58</volume><issue>10</issue><fpage>982</fpage><lpage>990</lpage><pub-id pub-id-type="doi">10.1016/j.jclinepi.2005.02.022</pub-id><pub-id pub-id-type="medline">16168343</pub-id></nlm-citation></ref><ref id="ref23"><label>23</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Zhang</surname><given-names>D</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Zhou</surname><given-names>L</given-names> </name><name name-style="western"><surname>Yuan</surname><given-names>H</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>D</given-names> </name></person-group><article-title>Multimodal classification of Alzheimer&#x2019;s disease and mild cognitive impairment</article-title><source>Neuroimage</source><year>2011</year><month>04</month><day>1</day><volume>55</volume><issue>3</issue><fpage>856</fpage><lpage>867</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.01.008</pub-id><pub-id pub-id-type="medline">21236349</pub-id></nlm-citation></ref><ref id="ref24"><label>24</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yun</surname><given-names>HJ</given-names> </name><name name-style="western"><surname>Kwak</surname><given-names>K</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>JM</given-names> </name><collab>Alzheimer&#x2019;s Disease Neuroimaging Initiative</collab></person-group><article-title>Multimodal discrimination of Alzheimer&#x2019;s disease based on regional cortical atrophy and hypometabolism</article-title><source>PLoS ONE</source><year>2015</year><volume>10</volume><issue>6</issue><fpage>e0129250</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0129250</pub-id><pub-id pub-id-type="medline">26061669</pub-id></nlm-citation></ref><ref id="ref25"><label>25</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Westman</surname><given-names>E</given-names> </name><name name-style="western"><surname>Muehlboeck</surname><given-names>JS</given-names> </name><name name-style="western"><surname>Simmons</surname><given-names>A</given-names> </name></person-group><article-title>Combining MRI and CSF measures for classification of Alzheimer&#x2019;s disease and prediction of mild cognitive impairment conversion</article-title><source>Neuroimage</source><year>2012</year><month>08</month><day>1</day><volume>62</volume><issue>1</issue><fpage>229</fpage><lpage>238</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.04.056</pub-id><pub-id pub-id-type="medline">22580170</pub-id></nlm-citation></ref><ref id="ref26"><label>26</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Vemuri</surname><given-names>P</given-names> </name><name name-style="western"><surname>Gunter</surname><given-names>JL</given-names> </name><name name-style="western"><surname>Senjem</surname><given-names>ML</given-names> </name><etal/></person-group><article-title>Alzheimer&#x2019;s disease diagnosis in individual subjects using structural MR images: validation studies</article-title><source>Neuroimage</source><year>2008</year><month>02</month><day>1</day><volume>39</volume><issue>3</issue><fpage>1186</fpage><lpage>1197</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2007.09.073</pub-id><pub-id pub-id-type="medline">18054253</pub-id></nlm-citation></ref><ref id="ref27"><label>27</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Suk</surname><given-names>HI</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>SW</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>D</given-names> </name></person-group><article-title>Hierarchical feature representation and multimodal fusion with deep learning for AD/MCI diagnosis</article-title><source>Neuroimage</source><year>2014</year><month>11</month><day>1</day><volume>101</volume><fpage>569</fpage><lpage>582</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2014.06.077</pub-id><pub-id pub-id-type="medline">25042445</pub-id></nlm-citation></ref><ref id="ref28"><label>28</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sayeed</surname><given-names>A</given-names> </name><name name-style="western"><surname>Petrou</surname><given-names>M</given-names> </name><name name-style="western"><surname>Spyrou</surname><given-names>N</given-names> </name><name name-style="western"><surname>Kadyrov</surname><given-names>A</given-names> </name><name name-style="western"><surname>Spinks</surname><given-names>T</given-names> </name></person-group><article-title>Diagnostic features of Alzheimer&#x2019;s disease extracted from PET sinograms</article-title><source>Phys Med Biol</source><year>2002</year><month>01</month><day>7</day><volume>47</volume><issue>1</issue><fpage>137</fpage><lpage>148</lpage><pub-id pub-id-type="doi">10.1088/0031-9155/47/1/310</pub-id><pub-id pub-id-type="medline">11814222</pub-id></nlm-citation></ref><ref id="ref29"><label>29</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Pan</surname><given-names>X</given-names> </name><name name-style="western"><surname>Adel</surname><given-names>M</given-names> </name><name name-style="western"><surname>Fossati</surname><given-names>C</given-names> </name><etal/></person-group><article-title>Multiscale spatial gradient features for <sup>18</sup>F-FDG PET image-guided diagnosis of Alzheimer&#x2019;s disease</article-title><source>Comput Methods Programs Biomed</source><year>2019</year><month>10</month><volume>180</volume><fpage>105027</fpage><pub-id pub-id-type="doi">10.1016/j.cmpb.2019.105027</pub-id><pub-id pub-id-type="medline">31430595</pub-id></nlm-citation></ref><ref id="ref30"><label>30</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Padilla</surname><given-names>P</given-names> </name><name name-style="western"><surname>L&#x00F3;pez</surname><given-names>M</given-names> </name><name name-style="western"><surname>G&#x00F3;rriz</surname><given-names>JM</given-names> </name><etal/></person-group><article-title>NMF-SVM based CAD tool applied to functional brain images for the diagnosis of Alzheimer&#x2019;s disease</article-title><source>IEEE Trans Med Imaging</source><year>2012</year><month>02</month><volume>31</volume><issue>2</issue><fpage>207</fpage><lpage>216</lpage><pub-id pub-id-type="doi">10.1109/TMI.2011.2167628</pub-id><pub-id pub-id-type="medline">21914569</pub-id></nlm-citation></ref><ref id="ref31"><label>31</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ni</surname><given-names>YC</given-names> </name><name name-style="western"><surname>Tseng</surname><given-names>FP</given-names> </name><name name-style="western"><surname>Pai</surname><given-names>MC</given-names> </name><etal/></person-group><article-title>Detection of Alzheimer&#x2019;s disease using ECD SPECT images by transfer learning from FDG PET</article-title><source>Ann Nucl Med</source><year>2021</year><month>08</month><volume>35</volume><issue>8</issue><fpage>889</fpage><lpage>899</lpage><pub-id pub-id-type="doi">10.1007/s12149-021-01626-3</pub-id><pub-id pub-id-type="medline">34076857</pub-id></nlm-citation></ref><ref id="ref32"><label>32</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Magnin</surname><given-names>B</given-names> </name><name name-style="western"><surname>Mesrob</surname><given-names>L</given-names> </name><name name-style="western"><surname>Kinkingn&#x00E9;hun</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Support vector machine-based classification of Alzheimer&#x2019;s disease from whole-brain anatomical MRI</article-title><source>Neuroradiology</source><year>2009</year><month>02</month><volume>51</volume><issue>2</issue><fpage>73</fpage><lpage>83</lpage><pub-id pub-id-type="doi">10.1007/s00234-008-0463-x</pub-id><pub-id pub-id-type="medline">18846369</pub-id></nlm-citation></ref><ref id="ref33"><label>33</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lu</surname><given-names>DH</given-names> </name><name name-style="western"><surname>Popuri</surname><given-names>K</given-names> </name><name name-style="western"><surname>Ding</surname><given-names>GW</given-names> </name><name name-style="western"><surname>Balachandar</surname><given-names>R</given-names> </name><name name-style="western"><surname>Beg</surname><given-names>MF</given-names> </name></person-group><article-title>Multiscale deep neural network based analysis of FDG-PET images for the early diagnosis of Alzheimer&#x2019;s disease</article-title><source>Med Image Anal</source><year>2018</year><month>05</month><volume>46</volume><fpage>26</fpage><lpage>34</lpage><pub-id pub-id-type="doi">10.1016/j.media.2018.02.002</pub-id></nlm-citation></ref><ref id="ref34"><label>34</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>MH</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>DQ</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>DG</given-names> </name></person-group><article-title>Ensemble sparse classification of Alzheimer&#x2019;s disease</article-title><source>Neuroimage</source><year>2012</year><month>04</month><volume>60</volume><issue>2</issue><fpage>1106</fpage><lpage>1116</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.01.055</pub-id></nlm-citation></ref><ref id="ref35"><label>35</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Liu</surname><given-names>M</given-names> </name><name name-style="western"><surname>Cheng</surname><given-names>D</given-names> </name><name name-style="western"><surname>Yan</surname><given-names>W</given-names> </name><collab>Alzheimer&#x2019;s Disease Neuroimaging Initiative</collab></person-group><article-title>Classification of Alzheimer&#x2019;s disease by combination of convolutional and recurrent neural networks using FDG-PET images</article-title><source>Front Neuroinform</source><year>2018</year><volume>12</volume><fpage>35</fpage><pub-id pub-id-type="doi">10.3389/fninf.2018.00035</pub-id><pub-id pub-id-type="medline">29970996</pub-id></nlm-citation></ref><ref id="ref36"><label>36</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>R</given-names> </name><name name-style="western"><surname>Perneczky</surname><given-names>R</given-names> </name><name name-style="western"><surname>Yakushev</surname><given-names>I</given-names> </name><etal/></person-group><article-title>Gaussian mixture models and model selection for [18F] fluorodeoxyglucose positron emission tomography classification in Alzheimer&#x2019;s disease</article-title><source>PLoS ONE</source><year>2015</year><month>04</month><volume>10</volume><issue>4</issue><fpage>e0122731</fpage><pub-id pub-id-type="doi">10.1371/journal.pone.0122731</pub-id></nlm-citation></ref><ref id="ref37"><label>37</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Lerch</surname><given-names>JP</given-names> </name><name name-style="western"><surname>Pruessner</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zijdenbos</surname><given-names>AP</given-names> </name><etal/></person-group><article-title>Automated cortical thickness measurements from MRI can accurately separate Alzheimer&#x2019;s patients from normal elderly controls</article-title><source>Neurobiol Aging</source><year>2008</year><month>01</month><volume>29</volume><issue>1</issue><fpage>23</fpage><lpage>30</lpage><pub-id pub-id-type="doi">10.1016/j.neurobiolaging.2006.09.013</pub-id><pub-id pub-id-type="medline">17097767</pub-id></nlm-citation></ref><ref id="ref38"><label>38</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kim</surname><given-names>HW</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>HE</given-names> </name><name name-style="western"><surname>Oh</surname><given-names>K</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>S</given-names> </name><name name-style="western"><surname>Yun</surname><given-names>M</given-names> </name><name name-style="western"><surname>Yoo</surname><given-names>SK</given-names> </name></person-group><article-title>Multi-slice representational learning of convolutional neural network for Alzheimer&#x2019;s disease classification using positron emission tomography</article-title><source>Biomed Eng Online</source><year>2020</year><month>09</month><day>7</day><volume>19</volume><issue>1</issue><fpage>70</fpage><pub-id pub-id-type="doi">10.1186/s12938-020-00813-z</pub-id><pub-id pub-id-type="medline">32894137</pub-id></nlm-citation></ref><ref id="ref39"><label>39</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Kim</surname><given-names>HW</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>HE</given-names> </name><name name-style="western"><surname>Lee</surname><given-names>S</given-names> </name><name name-style="western"><surname>Oh</surname><given-names>KT</given-names> </name><name name-style="western"><surname>Yun</surname><given-names>M</given-names> </name><name name-style="western"><surname>Yoo</surname><given-names>SK</given-names> </name></person-group><article-title>Slice-selective learning for Alzheimer&#x2019;s disease classification using a generative adversarial network: a feasibility study of external validation</article-title><source>Eur J Nucl Med Mol Imaging</source><year>2020</year><month>08</month><volume>47</volume><issue>9</issue><fpage>2197</fpage><lpage>2206</lpage><pub-id pub-id-type="doi">10.1007/s00259-019-04676-y</pub-id><pub-id pub-id-type="medline">31980910</pub-id></nlm-citation></ref><ref id="ref40"><label>40</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Katako</surname><given-names>A</given-names> </name><name name-style="western"><surname>Shelton</surname><given-names>P</given-names> </name><name name-style="western"><surname>Goertzen</surname><given-names>AL</given-names> </name><etal/></person-group><article-title>Machine learning identified an Alzheimer&#x2019;s disease-related FDG-PET pattern which is also expressed in Lewy body dementia and Parkinson&#x2019;s disease dementia</article-title><source>Sci Rep</source><year>2018</year><month>09</month><day>5</day><volume>8</volume><issue>1</issue><fpage>13236</fpage><pub-id pub-id-type="doi">10.1038/s41598-018-31653-6</pub-id><pub-id pub-id-type="medline">30185806</pub-id></nlm-citation></ref><ref id="ref41"><label>41</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ismail</surname><given-names>WN</given-names> </name><name name-style="western"><surname>P. P.</surname><given-names>FR</given-names> </name><name name-style="western"><surname>Ali</surname><given-names>MAS</given-names> </name></person-group><article-title>A meta-heuristic multi-objective optimization method for Alzheimer&#x2019;s disease detection based on multi-modal data</article-title><source>Mathematics</source><year>2023</year><month>02</month><volume>11</volume><issue>4</issue><fpage>957</fpage><pub-id pub-id-type="doi">10.3390/math11040957</pub-id></nlm-citation></ref><ref id="ref42"><label>42</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Ill&#x00E1;n</surname><given-names>IA</given-names> </name><name name-style="western"><surname>G&#x00F3;rriz</surname><given-names>JM</given-names> </name><name name-style="western"><surname>Ram&#x00ED;rez</surname><given-names>J</given-names> </name><etal/></person-group><article-title>18F-FDG PET imaging analysis for computer aided Alzheimer&#x2019;s diagnosis</article-title><source>Inf Sci (Ny)</source><year>2011</year><month>02</month><volume>181</volume><issue>4</issue><fpage>903</fpage><lpage>916</lpage><pub-id pub-id-type="doi">10.1016/j.ins.2010.10.027</pub-id></nlm-citation></ref><ref id="ref43"><label>43</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Hinrichs</surname><given-names>C</given-names> </name><name name-style="western"><surname>Singh</surname><given-names>V</given-names> </name><name name-style="western"><surname>Mukherjee</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Spatially augmented LPboosting for AD classification with evaluations on the ADNI dataset</article-title><source>Neuroimage</source><year>2009</year><month>10</month><day>15</day><volume>48</volume><issue>1</issue><fpage>138</fpage><lpage>149</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2009.05.056</pub-id><pub-id pub-id-type="medline">19481161</pub-id></nlm-citation></ref><ref id="ref44"><label>44</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gray</surname><given-names>KR</given-names> </name><name name-style="western"><surname>Wolz</surname><given-names>R</given-names> </name><name name-style="western"><surname>Heckemann</surname><given-names>RA</given-names> </name><etal/></person-group><article-title>Multi-region analysis of longitudinal FDG-PET for the classification of Alzheimer&#x2019;s disease</article-title><source>Neuroimage</source><year>2012</year><month>03</month><volume>60</volume><issue>1</issue><fpage>221</fpage><lpage>229</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.12.071</pub-id><pub-id pub-id-type="medline">22236449</pub-id></nlm-citation></ref><ref id="ref45"><label>45</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Gray</surname><given-names>KR</given-names> </name><name name-style="western"><surname>Aljabar</surname><given-names>P</given-names> </name><name name-style="western"><surname>Heckemann</surname><given-names>RA</given-names> </name><name name-style="western"><surname>Hammers</surname><given-names>A</given-names> </name><name name-style="western"><surname>Rueckert</surname><given-names>D</given-names> </name></person-group><article-title>Random forest-based similarity measures for multi-modal classification of Alzheimer&#x2019;s disease</article-title><source>Neuroimage</source><year>2013</year><month>01</month><day>15</day><volume>65</volume><fpage>167</fpage><lpage>175</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.09.065</pub-id><pub-id pub-id-type="medline">23041336</pub-id></nlm-citation></ref><ref id="ref46"><label>46</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Feng</surname><given-names>C</given-names> </name><name name-style="western"><surname>Elazab</surname><given-names>A</given-names> </name><name name-style="western"><surname>Yang</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Deep learning framework for Alzheimer&#x2019;s disease diagnosis via 3D-CNN and FSBi-LSTM</article-title><source>IEEE Access</source><year>2019</year><volume>7</volume><fpage>63605</fpage><lpage>63618</lpage><pub-id pub-id-type="doi">10.1109/ACCESS.2019.2913847</pub-id></nlm-citation></ref><ref id="ref47"><label>47</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cuingnet</surname><given-names>R</given-names> </name><name name-style="western"><surname>Gerardin</surname><given-names>E</given-names> </name><name name-style="western"><surname>Tessieras</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Automatic classification of patients with Alzheimer&#x2019;s disease from structural MRI: a comparison of ten methods using the ADNI database</article-title><source>Neuroimage</source><year>2011</year><month>05</month><day>15</day><volume>56</volume><issue>2</issue><fpage>766</fpage><lpage>781</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2010.06.013</pub-id><pub-id pub-id-type="medline">20542124</pub-id></nlm-citation></ref><ref id="ref48"><label>48</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chen</surname><given-names>L</given-names> </name><name name-style="western"><surname>Qiao</surname><given-names>HZ</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>F</given-names> </name></person-group><article-title>Alzheimer&#x2019;s disease diagnosis with brain structural MRI using multiview-slice attention and 3D convolution neural network</article-title><source>Front Aging Neurosci</source><year>2022</year><volume>14</volume><fpage>871706</fpage><pub-id pub-id-type="doi">10.3389/fnagi.2022.871706</pub-id><pub-id pub-id-type="medline">35557839</pub-id></nlm-citation></ref><ref id="ref49"><label>49</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Song</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zheng</surname><given-names>J</given-names> </name><name name-style="western"><surname>Li</surname><given-names>P</given-names> </name><name name-style="western"><surname>Lu</surname><given-names>X</given-names> </name><name name-style="western"><surname>Zhu</surname><given-names>G</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>P</given-names> </name></person-group><article-title>An effective multimodal image fusion method using MRI and PET for Alzheimer&#x2019;s disease diagnosis</article-title><source>Front Digit Health</source><year>2021</year><volume>3</volume><fpage>637386</fpage><pub-id pub-id-type="doi">10.3389/fdgth.2021.637386</pub-id><pub-id pub-id-type="medline">34713109</pub-id></nlm-citation></ref><ref id="ref50"><label>50</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Li</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Jiang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Lu</surname><given-names>J</given-names> </name><name name-style="western"><surname>Jiang</surname><given-names>J</given-names> </name><name name-style="western"><surname>Zhang</surname><given-names>H</given-names> </name><name name-style="western"><surname>Zuo</surname><given-names>C</given-names> </name></person-group><article-title>Radiomics: a novel feature extraction method for brain neuron degeneration disease using <sup>18</sup>F-FDG PET imaging and its implementation for Alzheimer&#x2019;s disease and mild cognitive impairment</article-title><source>Ther Adv Neurol Disord</source><year>2019</year><volume>12</volume><fpage>1756286419838682</fpage><pub-id pub-id-type="doi">10.1177/1756286419838682</pub-id><pub-id pub-id-type="medline">30956687</pub-id></nlm-citation></ref><ref id="ref51"><label>51</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>A</surname><given-names>A</given-names> </name><name name-style="western"><surname>M</surname><given-names>P</given-names> </name><name name-style="western"><surname>Hamdi</surname><given-names>M</given-names> </name><name name-style="western"><surname>Bourouis</surname><given-names>S</given-names> </name><name name-style="western"><surname>Rastislav</surname><given-names>K</given-names> </name><name name-style="western"><surname>Mohmed</surname><given-names>F</given-names> </name></person-group><article-title>Evaluation of neuro images for the diagnosis of Alzheimer&#x2019;s disease using deep learning neural network</article-title><source>Front Public Health</source><year>2022</year><volume>10</volume><fpage>834032</fpage><pub-id pub-id-type="doi">10.3389/fpubh.2022.834032</pub-id><pub-id pub-id-type="medline">35198526</pub-id></nlm-citation></ref><ref id="ref52"><label>52</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Toussaint</surname><given-names>PJ</given-names> </name><name name-style="western"><surname>Perlbarg</surname><given-names>V</given-names> </name><name name-style="western"><surname>Bellec</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Resting state FDG-PET functional connectivity as an early biomarker of Alzheimer&#x2019;s disease using conjoint univariate and independent component analyses</article-title><source>Neuroimage</source><year>2012</year><month>11</month><day>1</day><volume>63</volume><issue>2</issue><fpage>936</fpage><lpage>946</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2012.03.091</pub-id><pub-id pub-id-type="medline">22510256</pub-id></nlm-citation></ref><ref id="ref53"><label>53</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Tong</surname><given-names>T</given-names> </name><name name-style="western"><surname>Wolz</surname><given-names>R</given-names> </name><name name-style="western"><surname>Gao</surname><given-names>Q</given-names> </name><etal/></person-group><article-title>Multiple instance learning for classification of dementia in brain MRI</article-title><source>Med Image Anal</source><year>2014</year><month>07</month><volume>18</volume><issue>5</issue><fpage>808</fpage><lpage>818</lpage><pub-id pub-id-type="doi">10.1016/j.media.2014.04.006</pub-id><pub-id pub-id-type="medline">24858570</pub-id></nlm-citation></ref><ref id="ref54"><label>54</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Min</surname><given-names>R</given-names> </name><name name-style="western"><surname>Wu</surname><given-names>G</given-names> </name><name name-style="western"><surname>Cheng</surname><given-names>J</given-names> </name><name name-style="western"><surname>Wang</surname><given-names>Q</given-names> </name><name name-style="western"><surname>Shen</surname><given-names>D</given-names> </name></person-group><article-title>Multi-atlas based representations for Alzheimer&#x2019;s disease diagnosis</article-title><source>Hum Brain Mapp</source><year>2014</year><month>10</month><volume>35</volume><issue>10</issue><fpage>5052</fpage><lpage>5070</lpage><pub-id pub-id-type="doi">10.1002/hbm.22531</pub-id><pub-id pub-id-type="medline">24753060</pub-id></nlm-citation></ref><ref id="ref55"><label>55</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Jin</surname><given-names>D</given-names> </name><name name-style="western"><surname>Zhou</surname><given-names>B</given-names> </name><name name-style="western"><surname>Han</surname><given-names>Y</given-names> </name><etal/></person-group><article-title>Generalizable, reproducible, and neuroscientifically interpretable imaging biomarkers for Alzheimer&#x2019;s disease</article-title><source>Adv Sci (Weinh)</source><year>2020</year><month>07</month><volume>7</volume><issue>14</issue><fpage>2000675</fpage><pub-id pub-id-type="doi">10.1002/advs.202000675</pub-id><pub-id pub-id-type="medline">32714766</pub-id></nlm-citation></ref><ref id="ref56"><label>56</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Cho</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Seong</surname><given-names>JK</given-names> </name><name name-style="western"><surname>Jeong</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Shin</surname><given-names>SY</given-names> </name></person-group><article-title>Individual subject classification for Alzheimer&#x2019;s disease based on incremental learning using a spatial frequency representation of cortical thickness data</article-title><source>Neuroimage</source><year>2012</year><month>02</month><day>1</day><volume>59</volume><issue>3</issue><fpage>2217</fpage><lpage>2230</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.09.085</pub-id><pub-id pub-id-type="medline">22008371</pub-id></nlm-citation></ref><ref id="ref57"><label>57</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Chincarini</surname><given-names>A</given-names> </name><name name-style="western"><surname>Bosco</surname><given-names>P</given-names> </name><name name-style="western"><surname>Calvini</surname><given-names>P</given-names> </name><etal/></person-group><article-title>Local MRI analysis approach in the diagnosis of early and prodromal Alzheimer&#x2019;s disease</article-title><source>Neuroimage</source><year>2011</year><month>09</month><day>15</day><volume>58</volume><issue>2</issue><fpage>469</fpage><lpage>480</lpage><pub-id pub-id-type="doi">10.1016/j.neuroimage.2011.05.083</pub-id><pub-id pub-id-type="medline">21718788</pub-id></nlm-citation></ref><ref id="ref58"><label>58</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Beheshti</surname><given-names>I</given-names> </name><name name-style="western"><surname>Demirel</surname><given-names>H</given-names> </name><name name-style="western"><surname>Matsuda</surname><given-names>H</given-names> </name></person-group><article-title>Classification of Alzheimer&#x2019;s disease and prediction of mild cognitive impairment-to-Alzheimer&#x2019;s conversion from structural magnetic resource imaging using feature ranking and a genetic algorithm</article-title><source>Comput Biol Med</source><year>2017</year><month>04</month><day>1</day><volume>83</volume><issue>109-19</issue><fpage>109</fpage><lpage>119</lpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2017.02.011</pub-id><pub-id pub-id-type="medline">28260614</pub-id></nlm-citation></ref><ref id="ref59"><label>59</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Anandh</surname><given-names>KR</given-names> </name><name name-style="western"><surname>Sujatha</surname><given-names>CM</given-names> </name><name name-style="western"><surname>Ramakrishnan</surname><given-names>S</given-names> </name></person-group><article-title>A method to differentiate mild cognitive impairment and Alzheimer in MR images using eigen value descriptors</article-title><source>J Med Syst</source><year>2016</year><month>01</month><volume>40</volume><issue>1</issue><fpage>26547845</fpage><pub-id pub-id-type="doi">10.1007/s10916-015-0396-y</pub-id><pub-id pub-id-type="medline">26547845</pub-id></nlm-citation></ref><ref id="ref60"><label>60</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Amoroso</surname><given-names>N</given-names> </name><name name-style="western"><surname>La Rocca</surname><given-names>M</given-names> </name><name name-style="western"><surname>Bruno</surname><given-names>S</given-names> </name><etal/></person-group><article-title>Multiplex networks for early diagnosis of Alzheimer&#x2019;s disease</article-title><source>Front Aging Neurosci</source><year>2018</year><volume>10</volume><fpage>365</fpage><pub-id pub-id-type="doi">10.3389/fnagi.2018.00365</pub-id><pub-id pub-id-type="medline">30487745</pub-id></nlm-citation></ref><ref id="ref61"><label>61</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Yamane</surname><given-names>T</given-names> </name><name name-style="western"><surname>Ikari</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Nishio</surname><given-names>T</given-names> </name><etal/></person-group><article-title>Visual-statistical interpretation of (18)F-FDG-PET images for characteristic Alzheimer patterns in a multicenter study: inter-rater concordance and relationship to automated quantitative evaluation</article-title><source>AJNR Am J Neuroradiol</source><year>2014</year><month>02</month><volume>35</volume><issue>2</issue><fpage>244</fpage><lpage>249</lpage><pub-id pub-id-type="doi">10.3174/ajnr.A3665</pub-id><pub-id pub-id-type="medline">23907243</pub-id></nlm-citation></ref><ref id="ref62"><label>62</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Harper</surname><given-names>L</given-names> </name><name name-style="western"><surname>Fumagalli</surname><given-names>GG</given-names> </name><name name-style="western"><surname>Barkhof</surname><given-names>F</given-names> </name><etal/></person-group><article-title>MRI visual rating scales in the diagnosis of dementia: evaluation in 184 post-mortem confirmed cases</article-title><source>Brain (Bacau)</source><year>2016</year><month>04</month><volume>139</volume><issue>Pt 4</issue><fpage>1211</fpage><lpage>1225</lpage><pub-id pub-id-type="doi">10.1093/brain/aww005</pub-id><pub-id pub-id-type="medline">26936938</pub-id></nlm-citation></ref><ref id="ref63"><label>63</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Loddo</surname><given-names>A</given-names> </name><name name-style="western"><surname>Buttau</surname><given-names>S</given-names> </name><name name-style="western"><surname>Di Ruberto</surname><given-names>C</given-names> </name></person-group><article-title>Deep learning based pipelines for Alzheimer&#x2019;s disease diagnosis: a comparative study and a novel deep-ensemble method</article-title><source>Comput Biol Med</source><year>2022</year><month>02</month><volume>141</volume><fpage>105032</fpage><pub-id pub-id-type="doi">10.1016/j.compbiomed.2021.105032</pub-id><pub-id pub-id-type="medline">34838263</pub-id></nlm-citation></ref><ref id="ref64"><label>64</label><nlm-citation citation-type="confproc"><person-group person-group-type="author"><name name-style="western"><surname>Pawar</surname><given-names>U</given-names> </name><name name-style="western"><surname>O&#x2019;Shea</surname><given-names>D</given-names> </name><name name-style="western"><surname>Rea</surname><given-names>S</given-names> </name><name name-style="western"><surname>O&#x2019;Reilly</surname><given-names>R</given-names> </name></person-group><article-title>Explainable AI in healthcare</article-title><conf-name>2020 International Conference on Cyber Situational Awareness, Data Analytics and Assessment (CyberSA)</conf-name><conf-date>Jun 15-19, 2020</conf-date><conf-loc>Dublin, Ireland</conf-loc><pub-id pub-id-type="doi">10.1109/CyberSA49311.2020.9139655</pub-id></nlm-citation></ref><ref id="ref65"><label>65</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Birkenbihl</surname><given-names>C</given-names> </name><name name-style="western"><surname>Emon</surname><given-names>MA</given-names> </name><name name-style="western"><surname>Vrooman</surname><given-names>H</given-names> </name><etal/></person-group><article-title>Differences in cohort study data affect external validation of artificial intelligence models for predictive diagnostics of dementia - lessons for translation into clinical practice</article-title><source>EPMA J</source><year>2020</year><month>09</month><volume>11</volume><issue>3</issue><fpage>367</fpage><lpage>376</lpage><pub-id pub-id-type="doi">10.1007/s13167-020-00216-z</pub-id><pub-id pub-id-type="medline">32843907</pub-id></nlm-citation></ref><ref id="ref66"><label>66</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Barredo Arrieta</surname><given-names>A</given-names> </name><name name-style="western"><surname>D&#x00ED;az-Rodr&#x00ED;guez</surname><given-names>N</given-names> </name><name name-style="western"><surname>Del Ser</surname><given-names>J</given-names> </name><etal/></person-group><article-title>Explainable Artificial Intelligence (XAI): Concepts, taxonomies, opportunities and challenges toward responsible AI</article-title><source>Information Fusion</source><year>2020</year><month>06</month><volume>58</volume><fpage>82</fpage><lpage>115</lpage><pub-id pub-id-type="doi">10.1016/j.inffus.2019.12.012</pub-id></nlm-citation></ref><ref id="ref67"><label>67</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Mayerhoefer</surname><given-names>ME</given-names> </name><name name-style="western"><surname>Prosch</surname><given-names>H</given-names> </name><name name-style="western"><surname>Beer</surname><given-names>L</given-names> </name><etal/></person-group><article-title>PET/MRI versus PET/CT in oncology: a prospective single-center study of 330 examinations focusing on implications for patient management and cost considerations</article-title><source>Eur J Nucl Med Mol Imaging</source><year>2020</year><month>01</month><volume>47</volume><issue>1</issue><fpage>51</fpage><lpage>60</lpage><pub-id pub-id-type="doi">10.1007/s00259-019-04452-y</pub-id><pub-id pub-id-type="medline">31410538</pub-id></nlm-citation></ref><ref id="ref68"><label>68</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Goldberg</surname><given-names>CB</given-names> </name><name name-style="western"><surname>Adams</surname><given-names>L</given-names> </name><name name-style="western"><surname>Blumenthal</surname><given-names>D</given-names> </name><etal/></person-group><article-title>To do no harm - and the most good - with AI in health care</article-title><source>Nat Med</source><year>2024</year><month>03</month><volume>30</volume><issue>3</issue><fpage>623</fpage><lpage>627</lpage><pub-id pub-id-type="doi">10.1038/s41591-024-02853-7</pub-id><pub-id pub-id-type="medline">38388841</pub-id></nlm-citation></ref><ref id="ref69"><label>69</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Frizzell</surname><given-names>TO</given-names> </name><name name-style="western"><surname>Glashutter</surname><given-names>M</given-names> </name><name name-style="western"><surname>Liu</surname><given-names>CC</given-names> </name><etal/></person-group><article-title>Artificial intelligence in brain MRI analysis of Alzheimer&#x2019;s disease over the past 12 years: A systematic review</article-title><source>Ageing Res Rev</source><year>2022</year><month>05</month><volume>77</volume><fpage>101614</fpage><pub-id pub-id-type="doi">10.1016/j.arr.2022.101614</pub-id><pub-id pub-id-type="medline">35358720</pub-id></nlm-citation></ref><ref id="ref70"><label>70</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Borchert</surname><given-names>RJ</given-names> </name><name name-style="western"><surname>Azevedo</surname><given-names>T</given-names> </name><name name-style="western"><surname>Badhwar</surname><given-names>A</given-names> </name><etal/></person-group><article-title>Artificial intelligence for diagnostic and prognostic neuroimaging in dementia: A systematic review</article-title><source>Alzheimers Dement</source><year>2023</year><month>12</month><volume>19</volume><issue>12</issue><fpage>5885</fpage><lpage>5904</lpage><pub-id pub-id-type="doi">10.1002/alz.13412</pub-id><pub-id pub-id-type="medline">37563912</pub-id></nlm-citation></ref><ref id="ref71"><label>71</label><nlm-citation citation-type="journal"><person-group person-group-type="author"><name name-style="western"><surname>Sun</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Chen</surname><given-names>Y</given-names> </name><name name-style="western"><surname>Dong</surname><given-names>L</given-names> </name><etal/></person-group><article-title>Diagnostic performance of deep learning-assisted [<sup>18</sup>F]FDG PET imaging for Alzheimer&#x2019;s disease: a systematic review and meta-analysis</article-title><source>Eur J Nucl Med Mol Imaging</source><year>2025</year><month>08</month><volume>52</volume><issue>10</issue><fpage>3600</fpage><lpage>3612</lpage><pub-id pub-id-type="doi">10.1007/s00259-025-07228-9</pub-id><pub-id pub-id-type="medline">40159544</pub-id></nlm-citation></ref></ref-list><app-group><supplementary-material id="app1"><label>Multimedia Appendix 1</label><p>Detailed database search process, characteristics of included studies, quality assessment results, and subgroup analysis findings.</p><media xlink:href="aging_v8i1e76981_app1.docx" xlink:title="DOCX File, 59 KB"/></supplementary-material><supplementary-material id="app2"><label>Checklist 1</label><p>PRISMA 2020 checklist.</p><media xlink:href="aging_v8i1e76981_app2.pdf" xlink:title="PDF File, 101 KB"/></supplementary-material></app-group></back></article>