Here're some resources about Retrieval-Augmented Generation with LLMs
tag: VisRAG
| Tsinghua University
paper link: here
github link: here
citation:
@misc{yu2024visragvisionbasedretrievalaugmentedgeneration,
title={VisRAG: Vision-based Retrieval-augmented Generation on Multi-modality Documents},
author={Shi Yu and Chaoyue Tang and Bokai Xu and Junbo Cui and Junhao Ran and Yukun Yan and Zhenghao Liu and Shuo Wang and Xu Han and Zhiyuan Liu and Maosong Sun},
year={2024},
eprint={2410.10594},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2410.10594},
}
tag: LightRAG
| HKU
paper link: here
github link: here
citation:
@misc{guo2024lightragsimplefastretrievalaugmented,
title={LightRAG: Simple and Fast Retrieval-Augmented Generation},
author={Zirui Guo and Lianghao Xia and Yanhua Yu and Tu Ao and Chao Huang},
year={2024},
eprint={2410.05779},
archivePrefix={arXiv},
primaryClass={cs.IR},
url={https://arxiv.org/abs/2410.05779},
}
tag: MemoRAG
| BAAI
paper link: here
github link: here
citation:
@misc{qian2024memoragmovingnextgenrag,
title={MemoRAG: Moving towards Next-Gen RAG Via Memory-Inspired Knowledge Discovery},
author={Hongjin Qian and Peitian Zhang and Zheng Liu and Kelong Mao and Zhicheng Dou},
year={2024},
eprint={2409.05591},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2409.05591},
}
tag: RAGLAB
| EMNLP24
| Nanjing University
paper link: here
github link: here
citation:
@misc{zhang2024raglabmodularresearchorientedunified,
title={RAGLAB: A Modular and Research-Oriented Unified Framework for Retrieval-Augmented Generation},
author={Xuanwang Zhang and Yunze Song and Yidong Wang and Shuyun Tang and Xinfeng Li and Zhengran Zeng and Zhen Wu and Wei Ye and Wenyuan Xu and Yue Zhang and Xinyu Dai and Shikun Zhang and Qingsong Wen},
year={2024},
eprint={2408.11381},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2408.11381},
}
tag: LongRAG
| University of Waterloo
paper link: here
github link: here
homepage link: here
citation:
@misc{jiang2024longragenhancingretrievalaugmentedgeneration,
title={LongRAG: Enhancing Retrieval-Augmented Generation with Long-context LLMs},
author={Ziyan Jiang and Xueguang Ma and Wenhu Chen},
year={2024},
eprint={2406.15319},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2406.15319},
}
tag: GraphRAG
| Microsoft
paper link: here
blog link: here
github link: here
doc link: here
homepage link: here
citation:
@misc{edge2024localglobalgraphrag,
title={From Local to Global: A Graph RAG Approach to Query-Focused Summarization},
author={Darren Edge and Ha Trinh and Newman Cheng and Joshua Bradley and Alex Chao and Apurva Mody and Steven Truitt and Jonathan Larson},
year={2024},
eprint={2404.16130},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2404.16130},
}
tag: RQ-RAG
| HKU
paper link: here
github link: here
citation:
@misc{chan2024rqraglearningrefinequeries,
title={RQ-RAG: Learning to Refine Queries for Retrieval Augmented Generation},
author={Chi-Min Chan and Chunpu Xu and Ruibin Yuan and Hongyin Luo and Wei Xue and Yike Guo and Jie Fu},
year={2024},
eprint={2404.00610},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2404.00610},
}
tag: Self-RAG
| Allen AI
paper link: here
github link: here
homepage link: here
citation:
@misc{asai2023selfraglearningretrievegenerate,
title={Self-RAG: Learning to Retrieve, Generate, and Critique through Self-Reflection},
author={Akari Asai and Zeqiu Wu and Yizhong Wang and Avirup Sil and Hannaneh Hajishirzi},
year={2023},
eprint={2310.11511},
archivePrefix={arXiv},
primaryClass={cs.CL},
url={https://arxiv.org/abs/2310.11511},
}
tag: HyDE
| CMU
paper link: here
github link: here
citation:
@misc{gao2022precise,
title={Precise Zero-Shot Dense Retrieval without Relevance Labels},
author={Luyu Gao and Xueguang Ma and Jimmy Lin and Jamie Callan},
year={2022},
eprint={2212.10496},
archivePrefix={arXiv},
primaryClass={cs.IR}
}
tag: GenRead
| ICLR23
| Microsoft
paper link: here
github link: here
citation:
@article{yu2022generate,
title={Generate rather than retrieve: Large language models are strong context generators},
author={Yu, Wenhao and Iter, Dan and Wang, Shuohang and Xu, Yichong and Ju, Mingxuan and Sanyal, Soumya and Zhu, Chenguang and Zeng, Michael and Jiang, Meng},
journal={arXiv preprint arXiv:2209.10063},
year={2022}
}
tag: RAG
| NIPS20
| Meta
paper link: here
citation:
@article{lewis2020retrieval,
title={Retrieval-augmented generation for knowledge-intensive nlp tasks},
author={Lewis, Patrick and Perez, Ethan and Piktus, Aleksandra and Petroni, Fabio and Karpukhin, Vladimir and Goyal, Naman and K{\"u}ttler, Heinrich and Lewis, Mike and Yih, Wen-tau and Rockt{\"a}schel, Tim and others},
journal={Advances in Neural Information Processing Systems},
volume={33},
pages={9459--9474},
year={2020}
}
tag: RGB
| ISCAS
paper link: here
github link: here
citation:
@misc{chen2023benchmarking,
title={Benchmarking Large Language Models in Retrieval-Augmented Generation},
author={Jiawei Chen and Hongyu Lin and Xianpei Han and Le Sun},
year={2023},
eprint={2309.01431},
archivePrefix={arXiv},
primaryClass={cs.CL}
}
tag: GraphRAG Survey
| Peking University
paper link: here
github link: here
citation:
@misc{peng2024graphretrievalaugmentedgenerationsurvey,
title={Graph Retrieval-Augmented Generation: A Survey},
author={Boci Peng and Yun Zhu and Yongchao Liu and Xiaohe Bo and Haizhou Shi and Chuntao Hong and Yan Zhang and Siliang Tang},
year={2024},
eprint={2408.08921},
archivePrefix={arXiv},
primaryClass={cs.AI},
url={https://arxiv.org/abs/2408.08921},
}
tag: RAG Survey
| Tongji University
paper link: here
github link: here
citation:
@misc{gao2024retrievalaugmented,
title={Retrieval-Augmented Generation for Large Language Models: A Survey},
author={Yunfan Gao and Yun Xiong and Xinyu Gao and Kangxiang Jia and Jinliu Pan and Yuxi Bi and Yi Dai and Jiawei Sun and Qianyu Guo and Meng Wang and Haofen Wang},
year={2024},
eprint={2312.10997},
archivePrefix={arXiv},
primaryClass={cs.CL}
}