Skip to content

yhzh05/DFLNet

Repository files navigation

DFLNet

title = {Learning to sound imaging by a model-based interpretable network},
journal = {Signal Processing},
volume = {215},
pages = {109281},
year = {2024},
issn = {0165-1684},
doi = {https://doi.org/10.1016/j.sigpro.2023.109281},
url = {https://www.sciencedirect.com/science/article/pii/S0165168423003559},
author = {Yonghua Zhang and Xiaotong Tu and Saqlain Abbas and Hao Liang and Yue Huang and Xinghao Ding},
keywords = {Sound localization, Beamforming, Interpretable neural network, Array signal processing},
abstract = {Acoustic beamforming methods based on microphone arrays have been widely used for sound source localization in various industrial fields. The conventional methods such as Delay and Sum (DAS) beamforming are limited by poor spatial resolution although their computational complexity is low. Efforts have been made to improve the resolution of the beamforming map, achieved by both model-based and deep network-based approaches. However, the model-based methods usually suffer from high additional computational effort and largely rely on selecting user-determined parameters. The deep network-based methods may make it difficult to guarantee generalization as well as hard to generate the beamforming map directly. In this paper, we first propose a scheme DAMAS-FISTA-LASSO to solve the inverse problem of sound imaging, significantly reducing runtime requirements. By deep unfolding the proposed algorithm, we further have designed an end-to-end interpretable model-based deep neural network termed DAMAS-FISTA-LASSO-Net (DFLNet) for real-time and high-resolution mapping of acoustic sources, which could obtain satisfying performance on both simulated and real-world data, demonstrating the strong generalization capability of the network.}
}