@unpublished{barrientos2025developmentevaluationaidriventelemedicine,
title = {Development and Evaluation of an AI-Driven Telemedicine System for Prenatal Healthcare},
author = {Barrientos, Juan and Pérez, Michaelle and González, Douglas and Reyna, Favio and Fajardo, Julio and Lara, Andrea},
eprint = {2510.01194},
doi = {https://doi.org/10.48550/arXiv.2510.01194},
archiveprefix = {arXiv},
primaryclass = {cs.HC},
url = {https://arxiv.org/abs/2510.01194}
}
Access to obstetric ultrasound is often limited in low-resource settings, particularly in rural areas of low- and middle-income countries. This work proposes a human-in-the-loop artificial intelligence (AI) system designed to assist midwives in acquiring diagnostically relevant fetal images using blind sweep protocols. The system incorporates a classification model along with a web-based platform for asynchronous specialist reviews. By identifying key frames in blind sweep studies, the AI system allows specialists to concentrate on interpretation rather than having to review entire videos. To evaluate its performance, blind sweep videos captured by a small group of soft-trained midwives using a low-cost Point-of-Care Ultrasound (POCUS) device were analyzed. The system demonstrated promising results in identifying standard fetal planes from sweeps made by non-experts. A field evaluation indicated good usability and a low cognitive workload, suggesting that it has the potential to expand access to prenatal imaging in underserved regions.
@article{Lekadire081554,
author = {Lekadir, Karim and Frangi, Alejandro F and Porras, Antonio R and Glocker, Ben and Cintas, Celia and Langlotz, Curtis P and Weicken, Eva and Asselbergs, Folkert W and Prior, Fred and Collins, Gary S and Kaissis, Georgios and Tsakou, Gianna and Buvat, Ir{\`e}ne and Kalpathy-Cramer, Jayashree and Mongan, John and Schnabel, Julia A and Kushibar, Kaisar and Riklund, Katrine and Marias, Kostas and Amugongo, Lameck M and Fromont, Lauren A and Maier-Hein, Lena and Cerd{\'a}-Alberich, Leonor and Mart{\'\i}-Bonmat{\'\i}, Luis and Cardoso, M Jorge and Bobowicz, Maciej and Shabani, Mahsa and Tsiknakis, Manolis and Zuluaga, Maria A and Fritzsche, Marie-Christine and Camacho, Marina and Linguraru, Marius George and Wenzel, Markus and De Bruijne, Marleen and Tolsgaard, Martin G and Goisauf, Melanie and Cano Abad{\'\i}a, M{\'o}nica and Papanikolaou, Nikolaos and Lazrak, Noussair and Pujol, Oriol and Osuala, Richard and Napel, Sandy and Colantonio, Sara and Joshi, Smriti and Klein, Stefan and Auss{\'o}, Susanna and Rogers, Wendy A and Salahuddin, Zohaib and Starmans, Martijn P A},
title = {FUTURE-AI: international consensus guideline for trustworthy and deployable artificial intelligence in healthcare},
volume = {388},
elocation-id = {e081554},
year = {2025},
doi = {10.1136/bmj-2024-081554},
publisher = {BMJ Publishing Group Ltd},
url = {https://www.bmj.com/content/388/bmj-2024-081554},
eprint = {https://www.bmj.com/content/388/bmj-2024-081554.full.pdf},
journal = {BMJ}
}
Despite major advances in artificial intelligence (AI) research for healthcare, the deployment and adoption of AI technologies remain limited in clinical practice. This paper describes the FUTURE-AI framework, which provides guidance for the development and deployment of trustworthy AI tools in healthcare. The FUTURE-AI Consortium was founded in 2021 and comprises 117 interdisciplinary experts from 50 countries representing all continents, including AI scientists, clinical researchers, biomedical ethicists, and social scientists. Over a two year period, the FUTURE-AI guideline was established through consensus based on six guiding principles—fairness, universality, traceability, usability, robustness, and explainability. To operationalise trustworthy AI in healthcare, a set of 30 best practices were defined, addressing technical, clinical, socioethical, and legal dimensions. The recommendations cover the entire lifecycle of healthcare AI, from design, development, and validation to regulation, deployment, and monitoring.
@article{9918065,
author = {Lara-Hernández, A. and Rienmüller, T. and Juárez, I. and Pérez, M. and Reyna, F. and Baumgartner, D. and Makarenko, V. N. and Bockeria, O. L. and Maksudov, M. and Rienmüller, R. and Baumgartner, C.},
journal = {IEEE Transactions on Medical Imaging},
title = {Deep Learning-Based Image Registration in Dynamic Myocardial Perfusion CT Imaging},
year = {2023},
volume = {42},
number = {3},
pages = {684-696},
keywords = {Computed tomography;Myocardium;Strain;Image registration;Magnetic resonance imaging;Image sequences;Heart;Registration;deep learning;dynamic cardiac imaging;computed tomography;myocardial perfusion},
doi = {10.1109/TMI.2022.3214380}
}
Registration of dynamic CT image sequences is a crucial preprocessing step for clinical evaluation of multiple physiological determinants in the heart such as global and regional myocardial perfusion. In this work, we present a deformable deep learning-based image registration method for quantitative myocardial perfusion CT examinations, which in contrast to previous approaches, takes into account some unique challenges such as low image quality with less accurate anatomical landmarks, dynamic changes of contrast agent concentration in the heart chambers and tissue, and misalignment caused by cardiac stress, respiration, and patient motion. The introduced method uses a recursive cascade network with a ventricle segmentation module, and a novel loss function that accounts for local contrast changes over time. It was trained and validated on a dataset of n = 118 patients with known or suspected coronary artery disease and/or aortic valve insufficiency. Our results demonstrate that the proposed method is capable of registering dynamic cardiac perfusion sequences by reducing local tissue displacements of the left ventricle (LV), whereas contrast changes do not affect the registration and image quality, in particular the absolute CT (HU) values of the entire CT sequence. In addition, the deep learning-based approach presented reveals a short processing time of a few seconds compared to conventional image registration methods, demonstrating its application potential for quantitative CT myocardial perfusion measurements in daily clinical routine.
@article{9626462,
author = {Leitner, Christoph and Jarolim, Robert and Englmair, Bernhard and Kruse, Annika and Hernandez, Karen Andrea Lara and Konrad, Andreas and Su, Eric Yung-Sheng and Schröttner, Jörg and Kelly, Luke A. and Lichtwark, Glen A. and Tilp, Markus and Baumgartner, Christian},
journal = {IEEE Transactions on Biomedical Engineering},
title = {A Human-Centered Machine-Learning Approach for Muscle-Tendon Junction Tracking in Ultrasound Images},
year = {2022},
volume = {69},
number = {6},
pages = {1920-1930},
keywords = {Muscles;Tendons;Training;Junctions;Ultrasonic imaging;Instruments;Videos;Attention mechanism;anatomical landmark detection;convolutional neural network;domain generalization;feature extraction;label noise;locomotion;myotendinous junction;probability map;segmentation;sequential learning;soft labeling;U-net},
doi = {10.1109/TBME.2021.3130548}
}
Biomechanical and clinical gait research observes muscles and tendons in limbs to study their functions and behaviour. Therefore, movements of distinct anatomical landmarks, such as muscle-tendon junctions, are frequently measured. We propose a reliable and time efficient machine-learning approach to track these junctions in ultrasound videos and support clinical biomechanists in gait analysis. In order to facilitate this process, a method based on deep-learning was introduced. We gathered an extensive dataset, covering 3 functional movements, 2 muscles, collected on 123 healthy and 38 impaired subjects with 3 different ultrasound systems, and providing a total of 66864 annotated ultrasound images in our network training. Furthermore, we used data collected across independent laboratories and curated by researchers with varying levels of experience. For the evaluation of our method a diverse test-set was selected that is independently verified by four specialists. We show that our model achieves similar performance scores to the four human specialists in identifying the muscle-tendon junction position. Our method provides time-efficient tracking of muscle-tendon junctions, with prediction times of up to 0.078 seconds per frame (approx. 100 times faster than manual labeling). All our codes, trained models and test-set were made publicly available and our model is provided as a free-to-use online service on https://deepmtj.org/.
@article{LARAHERNANDEZ2021104200,
title = {Deep learning in spatiotemporal cardiac imaging: A review of methodologies and clinical usability},
journal = {Computers in Biology and Medicine},
volume = {130},
pages = {104200},
year = {2021},
issn = {0010-4825},
doi = {https://doi.org/10.1016/j.compbiomed.2020.104200},
url = {https://www.sciencedirect.com/science/article/pii/S001048252030531X},
author = {{Lara Hernandez}, Karen Andrea and Rienmüller, Theresa and Baumgartner, Daniela and Baumgartner, Christian},
keywords = {Deep learning, Cardiovascular imaging, Spatiotemporal image data, Clinical usability}
}
The use of different cardiac imaging modalities such as MRI, CT or ultrasound enables the visualization and interpretation of altered morphological structures and function of the heart. In recent years, there has been an increasing interest in AI and deep learning that take into account spatial and temporal information in medical image analysis. In particular, deep learning tools using temporal information in image processing have not yet found their way into daily clinical practice, despite its presumed high diagnostic and prognostic value. This review aims to synthesize the most relevant deep learning methods and discuss their clinical usability in dynamic cardiac imaging using for example the complete spatiotemporal image information of the heart cycle. Selected articles were categorized according to the following indicators: clinical applications, quality of datasets, preprocessing and annotation, learning methods and training strategy, and test performance. Clinical usability was evaluated based on these criteria by classifying the selected papers into (i) clinical level, (ii) robust candidate and (iii) proof of concept applications. Interestingly, not a single one of the reviewed papers was classified as a “clinical level” study. Almost 39% of the articles achieved a “robust candidate” and as many as 61% a “proof of concept” status. In summary, deep learning in spatiotemporal cardiac imaging is still strongly research-oriented and its implementation in clinical application still requires considerable efforts. Challenges that need to be addressed are the quality of datasets together with clinical verification and validation of the performance achieved by the used method.
@inproceedings{10783618,
author = {Illescas, J. and Perez, M. and Fajardo, J. and Lara, A. and Reyna, F.},
booktitle = {2024 20th International Symposium on Medical Information Processing and Analysis (SIPAIM)},
title = {AI-based Automatic Segmentation of Sinonasal Cavity in Cone-Beam Computed Tomography},
year = {2024},
volume = {},
number = {},
pages = {1-4},
keywords = {Image segmentation;Visualization;Three-dimensional displays;Accuracy;Computed tomography;Noise;Nose;Artificial intelligence;Medical diagnostic imaging;Diseases},
doi = {10.1109/SIPAIM62974.2024.10783618},
issn = {},
month = nov
}
The evaluation of the sinonasal cavity and paranasal sinuses is crucial for diagnosing nasal disturbances and tumors through Cone-Beam Computed Tomography (CBCT). This technology provides 3D volumes for better visualization, helping rapidly diagnose different diseases. The recent advancements in artificial intelligence methodologies applied to these medical images permit rapid and accurate segmentation of the sinonasal cavity in 3D CBCT studies, even when noise characteristics are unknown and difficult to eliminate. This study proposes an efficient AI-based method using a 2D U-Net to rapidly segment the sinonasal cavity in 3D CBCT studies. The method was validated on a dataset of 102 subjects, demonstrating high accuracy (about 99.3 %) and high efficiency, performing the segmentation in under 20 seconds.
@inproceedings{10555736,
author = {Fajardo, Julio and González, Ariel and Ordóñez, José Leal and Pérez, Michael and Lara, Andrea},
booktitle = {2024 IEEE Latin American Electron Devices Conference (LAEDC)},
title = {Laser-induced Graphene Electrodes for Urine Osmolarity Estimation through Electrochemical Impedance Spectroscopy},
year = {2024},
volume = {},
number = {},
pages = {1-4},
keywords = {Electrodes;Fabrication;Electric potential;Sociology;Graphene;Biomedical measurement;Impedance;Laser-induced graphene;electrodes;electrochemical impedance sprectroscopy;biomedical sensors},
doi = {10.1109/LAEDC61552.2024.10555736}
}
This study addresses the critical issue of dehydration, particularly in vulnerable populations with limited access to water resources. In this way, considering the limitations of current dehydration detection methods, this research proposes a novel approach utilizing cost-effective laser-induced graphene (LIG) electrodes and an AD5940 front-end for electrochemical impedance spectroscopy (EIS) on urine samples. The method aims to provide a reliable and affordable diagnostic tool for at-risk populations by incorporating machine learning algorithms. The experimental results demonstrate the effectiveness of LIG electrodes in EIS analysis, showcasing their potential for preliminary dehydration detection in urine. Furthermore, the study highlights the broader applications of this approach in monitoring various biological processes, suggesting its versatility in addressing health challenges beyond dehydration, such as wound healing, early cancer detection, and impedance-related monitoring in diverse medical contexts. This work presents a promising advancement in continuous monitoring technologies, especially in resource-limited settings. This work presents a promising advancement in continuous monitoring technologies, especially in resource-limited settings, such as osmolarity measurements, with a determination coefficient of about 0.96 and electrode impedance range of about 620±95 Ω.
@inproceedings{10373438,
author = {González, A. and Illescas, J. and González, D. and Pérez, M. and Fajardo, J. and Lara, A.},
booktitle = {2023 19th International Symposium on Medical Information Processing and Analysis (SIPAIM)},
title = {Near-Infrared Spectroscopy Driven Human-Machine Interface based on Convolutional Neural Networks},
year = {2023},
volume = {},
number = {},
pages = {1-4},
keywords = {Human-machine systems;Data acquisition;Neural networks;Muscles;Electromyography;Data models;Hemodynamics;Near-infrared spectroscopy;pattern recognition;hand gesture classification;human-machine interface;convolutional neural networks},
doi = {10.1109/SIPAIM56729.2023.10373438}
}
Human-machine interfaces based on the classification of hand gestures often use electromyography to collect and interpret user intent; however, this methodology records the electrical activity produced by skeletal muscles suffering from different drawbacks that affect their performance. In this way, other approaches, such as near-infrared spectroscopy, were studied, presenting advantages over traditional techniques since it is not susceptible to electrical noise and electrode degradation. This work presents a hand gesture classification system for a human-machine interface based on near-infrared spectroscopy and a convolutional neural network that classifies five gestures. The data acquisition system shows a transparent and fluent transfer of data that represents the hemodynamics of three specific sets of muscles used with a classification model for five different hand gestures, showing comparable and promising results against traditional electromyography-based classification methods
BiomedLab
Biomedical Engineering Institute
Univesidad Galileo
Universidad Galileo
4A Calle 7a. Avenida Zona 10
Guatemala City, GT 01010
© 2026 BiomedLab