Skip to content

Commit 42fc975

Browse files
authored
Update README.md
1 parent d19ce55 commit 42fc975

File tree

1 file changed

+26
-16
lines changed

1 file changed

+26
-16
lines changed

README.md

Lines changed: 26 additions & 16 deletions
Original file line numberDiff line numberDiff line change
@@ -129,21 +129,31 @@ CUDA_VISIBLE_DEVICES=3 python run_multiple_choice.py \
129129

130130
If the paper and code are helpful, please kindly cite our paper:
131131
```
132-
@inproceedings{Bao24amrlda,
133-
author = {Qiming Bao and
134-
Alex Yuxuan Peng and
135-
Zhenyun Deng and
136-
Wanjun Zhong and
137-
Gaël Gendron and
138-
Neşet Tan and
139-
Nathan Young and
140-
Yang Chen and
141-
Yonghua Zhu and
142-
Michael Witbrock and
143-
Jiamou Liu},
144-
title = {Abstract Meaning Representation-Based Logic-Driven Data Augmentation for Logical Reasoning},
145-
booktitle = {Findings of ACL},
146-
publisher = {{ACL}},
147-
year = {2024}
132+
@inproceedings{bao-etal-2024-abstract,
133+
title = "{A}bstract {M}eaning {R}epresentation-Based Logic-Driven Data Augmentation for Logical Reasoning",
134+
author = "Bao, Qiming and
135+
Peng, Alex and
136+
Deng, Zhenyun and
137+
Zhong, Wanjun and
138+
Gendron, Gael and
139+
Pistotti, Timothy and
140+
Tan, Neset and
141+
Young, Nathan and
142+
Chen, Yang and
143+
Zhu, Yonghua and
144+
Denny, Paul and
145+
Witbrock, Michael and
146+
Liu, Jiamou",
147+
editor = "Ku, Lun-Wei and
148+
Martins, Andre and
149+
Srikumar, Vivek",
150+
booktitle = "Findings of the Association for Computational Linguistics ACL 2024",
151+
month = aug,
152+
year = "2024",
153+
address = "Bangkok, Thailand and virtual meeting",
154+
publisher = "Association for Computational Linguistics",
155+
url = "https://aclanthology.org/2024.findings-acl.353",
156+
pages = "5914--5934",
157+
abstract = "Combining large language models with logical reasoning enhances their capacity to address problems in a robust and reliable manner. Nevertheless, the intricate nature of logical reasoning poses challenges when gathering reliable data from the web to build comprehensive training datasets, subsequently affecting performance on downstream tasks. To address this, we introduce a novel logic-driven data augmentation approach, AMR-LDA. AMR-LDA converts the original text into an Abstract Meaning Representation (AMR) graph, a structured semantic representation that encapsulates the logical structure of the sentence, upon which operations are performed to generate logically modified AMR graphs. The modified AMR graphs are subsequently converted back into text to create augmented data. Notably, our methodology is architecture-agnostic and enhances both generative large language models, such as GPT-3.5 and GPT-4, through prompt augmentation, and discriminative large language models through contrastive learning with logic-driven data augmentation. Empirical evidence underscores the efficacy of our proposed method with improvement in performance across seven downstream tasks, such as reading comprehension requiring logical reasoning, textual entailment, and natural language inference. Furthermore, our method leads on the ReClor leaderboard. The source code and data are publicly available",
148158
}
149159
```

0 commit comments

Comments
 (0)