@inproceedings{tang-etal-2018-speeding,
title = "Speeding up Context-based Sentence Representation Learning with Non-autoregressive Convolutional Decoding",
author = "Tang, Shuai and
Jin, Hailin and
Fang, Chen and
Wang, Zhaowen and
de Sa, Virginia",
editor = "Augenstein, Isabelle and
Cao, Kris and
He, He and
Hill, Felix and
Gella, Spandana and
Kiros, Jamie and
Mei, Hongyuan and
Misra, Dipendra",
booktitle = "Proceedings of the Third Workshop on Representation Learning for {NLP}",
month = jul,
year = "2018",
address = "Melbourne, Australia",
publisher = "Association for Computational Linguistics",
url = "https://aclanthology.org/W18-3009",
doi = "10.18653/v1/W18-3009",
pages = "69--78",
abstract = "We propose an asymmetric encoder-decoder structure, which keeps an RNN as the encoder and has a CNN as the decoder, and the model only explores the subsequent context information as the supervision. The asymmetry in both model architecture and training pair reduces a large amount of the training time. The contribution of our work is summarized as 1. We design experiments to show that an autoregressive decoder or an RNN decoder is not necessary for the encoder-decoder type of models in terms of learning sentence representations, and based on our results, we present 2 findings. 2. The two interesting findings lead to our final model design, which has an RNN encoder and a CNN decoder, and it learns to encode the current sentence and decode the subsequent contiguous words all at once. 3. With a suite of techniques, our model performs good on downstream tasks and can be trained efficiently on a large unlabelled corpus.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="tang-etal-2018-speeding">
<titleInfo>
<title>Speeding up Context-based Sentence Representation Learning with Non-autoregressive Convolutional Decoding</title>
</titleInfo>
<name type="personal">
<namePart type="given">Shuai</namePart>
<namePart type="family">Tang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hailin</namePart>
<namePart type="family">Jin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Chen</namePart>
<namePart type="family">Fang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Zhaowen</namePart>
<namePart type="family">Wang</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Virginia</namePart>
<namePart type="family">de Sa</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2018-07</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the Third Workshop on Representation Learning for NLP</title>
</titleInfo>
<name type="personal">
<namePart type="given">Isabelle</namePart>
<namePart type="family">Augenstein</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Kris</namePart>
<namePart type="family">Cao</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">He</namePart>
<namePart type="family">He</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Felix</namePart>
<namePart type="family">Hill</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Spandana</namePart>
<namePart type="family">Gella</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Jamie</namePart>
<namePart type="family">Kiros</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Hongyuan</namePart>
<namePart type="family">Mei</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dipendra</namePart>
<namePart type="family">Misra</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>Association for Computational Linguistics</publisher>
<place>
<placeTerm type="text">Melbourne, Australia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>We propose an asymmetric encoder-decoder structure, which keeps an RNN as the encoder and has a CNN as the decoder, and the model only explores the subsequent context information as the supervision. The asymmetry in both model architecture and training pair reduces a large amount of the training time. The contribution of our work is summarized as 1. We design experiments to show that an autoregressive decoder or an RNN decoder is not necessary for the encoder-decoder type of models in terms of learning sentence representations, and based on our results, we present 2 findings. 2. The two interesting findings lead to our final model design, which has an RNN encoder and a CNN decoder, and it learns to encode the current sentence and decode the subsequent contiguous words all at once. 3. With a suite of techniques, our model performs good on downstream tasks and can be trained efficiently on a large unlabelled corpus.</abstract>
<identifier type="citekey">tang-etal-2018-speeding</identifier>
<identifier type="doi">10.18653/v1/W18-3009</identifier>
<location>
<url>https://aclanthology.org/W18-3009</url>
</location>
<part>
<date>2018-07</date>
<extent unit="page">
<start>69</start>
<end>78</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T Speeding up Context-based Sentence Representation Learning with Non-autoregressive Convolutional Decoding
%A Tang, Shuai
%A Jin, Hailin
%A Fang, Chen
%A Wang, Zhaowen
%A de Sa, Virginia
%Y Augenstein, Isabelle
%Y Cao, Kris
%Y He, He
%Y Hill, Felix
%Y Gella, Spandana
%Y Kiros, Jamie
%Y Mei, Hongyuan
%Y Misra, Dipendra
%S Proceedings of the Third Workshop on Representation Learning for NLP
%D 2018
%8 July
%I Association for Computational Linguistics
%C Melbourne, Australia
%F tang-etal-2018-speeding
%X We propose an asymmetric encoder-decoder structure, which keeps an RNN as the encoder and has a CNN as the decoder, and the model only explores the subsequent context information as the supervision. The asymmetry in both model architecture and training pair reduces a large amount of the training time. The contribution of our work is summarized as 1. We design experiments to show that an autoregressive decoder or an RNN decoder is not necessary for the encoder-decoder type of models in terms of learning sentence representations, and based on our results, we present 2 findings. 2. The two interesting findings lead to our final model design, which has an RNN encoder and a CNN decoder, and it learns to encode the current sentence and decode the subsequent contiguous words all at once. 3. With a suite of techniques, our model performs good on downstream tasks and can be trained efficiently on a large unlabelled corpus.
%R 10.18653/v1/W18-3009
%U https://aclanthology.org/W18-3009
%U https://doi.org/10.18653/v1/W18-3009
%P 69-78
Markdown (Informal)
[Speeding up Context-based Sentence Representation Learning with Non-autoregressive Convolutional Decoding](https://aclanthology.org/W18-3009) (Tang et al., RepL4NLP 2018)
ACL