@inproceedings{linqing-weilei-2023-dynamic,
title = "Dynamic-{FACT}: A Dynamic Framework for Adaptive Context-Aware Translation",
author = "Linqing, Chen and
Weilei, Wang",
editor = "Sun, Maosong and
Qin, Bing and
Qiu, Xipeng and
Jiang, Jing and
Han, Xianpei",
booktitle = "Proceedings of the 22nd Chinese National Conference on Computational Linguistics",
month = aug,
year = "2023",
address = "Harbin, China",
publisher = "Chinese Information Processing Society of China",
url = "https://aclanthology.org/2023.ccl-1.57",
pages = "665--676",
abstract = "{``}Document-level neural machine translation (NMT) has garnered considerable attention sincethe emergence of various context-aware NMT models. However, these static NMT models aretrained on fixed parallel datasets, thus lacking awareness of the target document during infer-ence. In order to alleviate this limitation, we propose a dynamic adapter-translator frameworkfor context-aware NMT, which adapts the trained NMT model to the input document prior totranslation. Specifically, the document adapter reconstructs the scrambled portion of the originaldocument from a deliberately corrupted version, thereby reducing the performance disparity be-tween training and inference. To achieve this, we employ an adaptation process in both the train-ing and inference stages. Our experimental results on document-level translation benchmarksdemonstrate significant enhancements in translation performance, underscoring the necessity ofdynamic adaptation for context-aware translation and the efficacy of our methodologies. Introduction{''}",
language = "English",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="linqing-weilei-2023-dynamic">
<titleInfo>
<title>Dynamic-FACT: A Dynamic Framework for Adaptive Context-Aware Translation</title>
</titleInfo>
<name type="personal">
<namePart type="given">Chen</namePart>
<namePart type="family">Linqing</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Wang</namePart>
<namePart type="family">Weilei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2023-08</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<language>
<languageTerm type="text">English</languageTerm>
<languageTerm type="code" authority="iso639-2b">eng</languageTerm>
</language>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 22nd Chinese National Conference on Computational Linguistics</title>
</titleInfo>
<name type="personal">
<namePart type="given">Maosong</namePart>
<namePart type="family">Sun</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Bing</namePart>
<namePart type="family">Qin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xipeng</namePart>
<namePart type="family">Qiu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jing</namePart>
<namePart type="family">Jiang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Xianpei</namePart>
<namePart type="family">Han</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Chinese Information Processing Society of China</publisher>
<place>
<placeTerm type="text">Harbin, China</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>“Document-level neural machine translation (NMT) has garnered considerable attention sincethe emergence of various context-aware NMT models. However, these static NMT models aretrained on fixed parallel datasets, thus lacking awareness of the target document during infer-ence. In order to alleviate this limitation, we propose a dynamic adapter-translator frameworkfor context-aware NMT, which adapts the trained NMT model to the input document prior totranslation. Specifically, the document adapter reconstructs the scrambled portion of the originaldocument from a deliberately corrupted version, thereby reducing the performance disparity be-tween training and inference. To achieve this, we employ an adaptation process in both the train-ing and inference stages. Our experimental results on document-level translation benchmarksdemonstrate significant enhancements in translation performance, underscoring the necessity ofdynamic adaptation for context-aware translation and the efficacy of our methodologies. Introduction”</abstract>
<identifier type="citekey">linqing-weilei-2023-dynamic</identifier>
<location>
<url>https://aclanthology.org/2023.ccl-1.57</url>
</location>
<part>
<date>2023-08</date>
<extent unit="page">
<start>665</start>
<end>676</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Dynamic-FACT: A Dynamic Framework for Adaptive Context-Aware Translation
%A Linqing, Chen
%A Weilei, Wang
%Y Sun, Maosong
%Y Qin, Bing
%Y Qiu, Xipeng
%Y Jiang, Jing
%Y Han, Xianpei
%S Proceedings of the 22nd Chinese National Conference on Computational Linguistics
%D 2023
%8 August
%I Chinese Information Processing Society of China
%C Harbin, China
%G English
%F linqing-weilei-2023-dynamic
%X “Document-level neural machine translation (NMT) has garnered considerable attention sincethe emergence of various context-aware NMT models. However, these static NMT models aretrained on fixed parallel datasets, thus lacking awareness of the target document during infer-ence. In order to alleviate this limitation, we propose a dynamic adapter-translator frameworkfor context-aware NMT, which adapts the trained NMT model to the input document prior totranslation. Specifically, the document adapter reconstructs the scrambled portion of the originaldocument from a deliberately corrupted version, thereby reducing the performance disparity be-tween training and inference. To achieve this, we employ an adaptation process in both the train-ing and inference stages. Our experimental results on document-level translation benchmarksdemonstrate significant enhancements in translation performance, underscoring the necessity ofdynamic adaptation for context-aware translation and the efficacy of our methodologies. Introduction”
%U https://aclanthology.org/2023.ccl-1.57
%P 665-676
Markdown (Informal)
[Dynamic-FACT: A Dynamic Framework for Adaptive Context-Aware Translation](https://aclanthology.org/2023.ccl-1.57) (Linqing & Weilei, CCL 2023)
ACL