@inproceedings{yin-etal-2024-synprompt,
title = "{S}yn{P}rompt: Syntax-aware Enhanced Prompt Engineering for Aspect-based Sentiment Analysis",
author = "Yin, Wen and
Liu, Cencen and
Xu, Yi and
Wahla, Ahmad Raza and
Yiting, Huang and
Zheng, Dezhang",
editor = "Calzolari, Nicoletta and
Kan, Min-Yen and
Hoste, Veronique and
Lenci, Alessandro and
Sakti, Sakriani and
Xue, Nianwen",
booktitle = "Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)",
month = may,
year = "2024",
address = "Torino, Italia",
publisher = "ELRA and ICCL",
url = "https://aclanthology.org/2024.lrec-main.1344",
pages = "15469--15479",
abstract = "Although there have been some works using prompt learning for the Aspect-based Sentiment Analysis(ABSA) tasks, their methods of prompt-tuning are simple and crude. Compared with vanilla fine-tuning methods, prompt learning intuitively bridges the objective form gap between pre-training and fine-tuning. Concretely, simply constructing prompt related to aspect words fails to fully exploit the potential of Pre-trained Language Models, and conducting more robust and professional prompt engineering for downstream tasks is a challenging problem that needs to be solved urgently. Therefore, in this paper, we propose a novel Syntax-aware Enhanced Prompt method (SynPrompt), which sufficiently mines the key syntactic information related to aspect words from the syntactic dependency tree. Additionally, to effectively harness the domain-specific knowledge embedded within PLMs for the ABSA tasks, we construct two adaptive prompt frameworks to enhance the perception ability of the above method. After conducting extensive experiments on three benchmark datasets, we have found that our method consistently achieves favorable results. These findings not only demonstrate the effectiveness and rationality of our proposed methods but also provide a powerful alternative to traditional prompt-tuning.",
}
<?xml version="1.0" encoding="UTF-8"?>
<modsCollection xmlns="http://www.loc.gov/mods/v3">
<mods ID="yin-etal-2024-synprompt">
<titleInfo>
<title>SynPrompt: Syntax-aware Enhanced Prompt Engineering for Aspect-based Sentiment Analysis</title>
</titleInfo>
<name type="personal">
<namePart type="given">Wen</namePart>
<namePart type="family">Yin</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Cencen</namePart>
<namePart type="family">Liu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Yi</namePart>
<namePart type="family">Xu</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Ahmad</namePart>
<namePart type="given">Raza</namePart>
<namePart type="family">Wahla</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Huang</namePart>
<namePart type="family">Yiting</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Dezhang</namePart>
<namePart type="family">Zheng</namePart>
<role>
<roleTerm authority="marcrelator" type="text">author</roleTerm>
</role>
</name>
<originInfo>
<dateIssued>2024-05</dateIssued>
</originInfo>
<typeOfResource>text</typeOfResource>
<relatedItem type="host">
<titleInfo>
<title>Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)</title>
</titleInfo>
<name type="personal">
<namePart type="given">Nicoletta</namePart>
<namePart type="family">Calzolari</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Min-Yen</namePart>
<namePart type="family">Kan</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Veronique</namePart>
<namePart type="family">Hoste</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Alessandro</namePart>
<namePart type="family">Lenci</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Sakriani</namePart>
<namePart type="family">Sakti</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<name type="personal">
<namePart type="given">Nianwen</namePart>
<namePart type="family">Xue</namePart>
<role>
<roleTerm authority="marcrelator" type="text">editor</roleTerm>
</role>
</name>
<originInfo>
<publisher>ELRA and ICCL</publisher>
<place>
<placeTerm type="text">Torino, Italia</placeTerm>
</place>
</originInfo>
<genre authority="marcgt">conference publication</genre>
</relatedItem>
<abstract>Although there have been some works using prompt learning for the Aspect-based Sentiment Analysis(ABSA) tasks, their methods of prompt-tuning are simple and crude. Compared with vanilla fine-tuning methods, prompt learning intuitively bridges the objective form gap between pre-training and fine-tuning. Concretely, simply constructing prompt related to aspect words fails to fully exploit the potential of Pre-trained Language Models, and conducting more robust and professional prompt engineering for downstream tasks is a challenging problem that needs to be solved urgently. Therefore, in this paper, we propose a novel Syntax-aware Enhanced Prompt method (SynPrompt), which sufficiently mines the key syntactic information related to aspect words from the syntactic dependency tree. Additionally, to effectively harness the domain-specific knowledge embedded within PLMs for the ABSA tasks, we construct two adaptive prompt frameworks to enhance the perception ability of the above method. After conducting extensive experiments on three benchmark datasets, we have found that our method consistently achieves favorable results. These findings not only demonstrate the effectiveness and rationality of our proposed methods but also provide a powerful alternative to traditional prompt-tuning.</abstract>
<identifier type="citekey">yin-etal-2024-synprompt</identifier>
<location>
<url>https://aclanthology.org/2024.lrec-main.1344</url>
</location>
<part>
<date>2024-05</date>
<extent unit="page">
<start>15469</start>
<end>15479</end>
</extent>
</part>
</mods>
</modsCollection>
%0 Conference Proceedings
%T SynPrompt: Syntax-aware Enhanced Prompt Engineering for Aspect-based Sentiment Analysis
%A Yin, Wen
%A Liu, Cencen
%A Xu, Yi
%A Wahla, Ahmad Raza
%A Yiting, Huang
%A Zheng, Dezhang
%Y Calzolari, Nicoletta
%Y Kan, Min-Yen
%Y Hoste, Veronique
%Y Lenci, Alessandro
%Y Sakti, Sakriani
%Y Xue, Nianwen
%S Proceedings of the 2024 Joint International Conference on Computational Linguistics, Language Resources and Evaluation (LREC-COLING 2024)
%D 2024
%8 May
%I ELRA and ICCL
%C Torino, Italia
%F yin-etal-2024-synprompt
%X Although there have been some works using prompt learning for the Aspect-based Sentiment Analysis(ABSA) tasks, their methods of prompt-tuning are simple and crude. Compared with vanilla fine-tuning methods, prompt learning intuitively bridges the objective form gap between pre-training and fine-tuning. Concretely, simply constructing prompt related to aspect words fails to fully exploit the potential of Pre-trained Language Models, and conducting more robust and professional prompt engineering for downstream tasks is a challenging problem that needs to be solved urgently. Therefore, in this paper, we propose a novel Syntax-aware Enhanced Prompt method (SynPrompt), which sufficiently mines the key syntactic information related to aspect words from the syntactic dependency tree. Additionally, to effectively harness the domain-specific knowledge embedded within PLMs for the ABSA tasks, we construct two adaptive prompt frameworks to enhance the perception ability of the above method. After conducting extensive experiments on three benchmark datasets, we have found that our method consistently achieves favorable results. These findings not only demonstrate the effectiveness and rationality of our proposed methods but also provide a powerful alternative to traditional prompt-tuning.
%U https://aclanthology.org/2024.lrec-main.1344
%P 15469-15479
Markdown (Informal)
[SynPrompt: Syntax-aware Enhanced Prompt Engineering for Aspect-based Sentiment Analysis](https://aclanthology.org/2024.lrec-main.1344) (Yin et al., LREC-COLING 2024)
ACL