@inproceedings{34e8d6f1c94b49e58cc05eff5fa61773,
title = "Benchmarking Evaluation Metrics for Code-Switching Automatic Speech Recognition",
abstract = "Code-switching poses a number of challenges and opportunities for multilingual automatic speech recognition. In this paper, we focus on the question of robust and fair evaluation metrics. To that end, we develop a reference benchmark data set of code-switching speech recognition hypotheses with human judgments. We define clear guidelines for minimal editing of automatic hypotheses. We validate the guidelines using 4-way inter-annotator agreement. We evaluate a large number of metrics in terms of correlation with human judgments. The metrics we consider vary in terms of representation (orthographic, phonological, semantic), directness (intrinsic vs extrinsic), granularity (e.g. word, character), and similarity computation method. The highest correlation to human judgment is achieved using transliteration followed by text normalization. We release the first corpus for human acceptance of code-switching speech recognition results in dialectal Arabic/English conversation speech.",
keywords = "ASR, Code-switching, Evaluation metric",
author = "Injy Hamed and Amir Hussein and Oumnia Chellah and Shammur Chowdhury and Hamdy Mubarak and Sunayana Sitaram and Nizar Habash and Ahmed Ali",
note = "Publisher Copyright: {\textcopyright} 2023 IEEE.; 2022 IEEE Spoken Language Technology Workshop, SLT 2022 ; Conference date: 09-01-2023 Through 12-01-2023",
year = "2023",
doi = "10.1109/SLT54892.2023.10023181",
language = "English",
series = "2022 IEEE Spoken Language Technology Workshop, SLT 2022 - Proceedings",
publisher = "Institute of Electrical and Electronics Engineers Inc.",
pages = "999--1005",
booktitle = "2022 IEEE Spoken Language Technology Workshop, SLT 2022 - Proceedings",
address = "United States",
}