@inproceedings{4f2b35be5d78477caceb4fa2d5023d62,
title = "Data Selection Curriculum for Neural Machine Translation",
abstract = "Neural Machine Translation (NMT) models are typically trained on heterogeneous data that are concatenated and randomly shuffled. However, not all of the training data are equally useful to the model. Curriculum training aims to present the data to the NMT models in a meaningful order. In this work, we introduce a two-stage training framework for NMT where we fine-tune a base NMT model on subsets of data, selected by both deterministic scoring using pre-trained methods and online scoring that considers prediction scores of the emerging NMT model. Through comprehensive experiments on six language pairs comprising low- and high-resource languages from WMT'21, we have shown that our curriculum strategies consistently demonstrate better quality (up to +2.2 BLEU improvement) and faster convergence (approximately 50\% fewer updates).",
author = "Tasnim Mohiuddin and Philipp Koehn and Vishrav Chaudhary and James Cross and Shruti Bhosale and Shafiq Joty",
note = "Publisher Copyright: {\textcopyright} 2022 Association for Computational Linguistics.; 2022 Findings of the Association for Computational Linguistics: EMNLP 2022 ; Conference date: 07-12-2022 Through 11-12-2022",
year = "2022",
doi = "10.18653/v1/2022.findings-emnlp.519",
language = "English",
series = "Findings of the Association for Computational Linguistics: EMNLP 2022",
publisher = "Association for Computational Linguistics (ACL)",
pages = "1569--1582",
editor = "Yoav Goldberg and Zornitsa Kozareva and Yue Zhang",
booktitle = "Findings of the Association for Computational Linguistics",
address = "United States",
}