diff --git a/_news/locolm-accepted-r2fm.md b/_news/locolm-accepted-r2fm.md
new file mode 100644
index 0000000..5e2fc29
--- /dev/null
+++ b/_news/locolm-accepted-r2fm.md
@@ -0,0 +1,7 @@
+---
+title: "Loco LM @ R2FM"
+collection: news
+permalink: /news/loco-lm-r2fm
+date: 2024-03-06
+---
+How to make LLMs more logically consistent? Check our work at the ICLR 2024 Workshop on on Reliable and Responsible Foundation Models.
\ No newline at end of file
diff --git a/_publications/calanzone2024locolm.md b/_publications/calanzone2024locolm.md
new file mode 100644
index 0000000..239d9c5
--- /dev/null
+++ b/_publications/calanzone2024locolm.md
@@ -0,0 +1,21 @@
+---
+collection: publications
+ref: "calanzone2024locolm"
+permalink: "publications/calanzone2024locolm"
+title: "Galerkin meets Laplace: Fast uncertainty estimation in neural PDEs"
+date: 2024-03-05 10:00
+tags: nesy probml llm
+image: "/images/papers/calanzone2024locolm/locolm.png"
+authors: "Diego Calanzone, Antonio Vergari, Stefano Teso"
+paperurl: "https://openreview.net/forum?id=q3SGbfj19d"
+pdf: "https://openreview.net/pdf?id=q3SGbfj19d"
+venue: "R2FM Workshop @ ICLR 2024"
+excerpt: "We introduce a training objective based on principled probabilistic reasoning that teaches a LLM to be logically consistent with a set of external facts and rules, allowing to extrapolate to unseen but semantically similar factual knowledge."
+abstract: "Large language models (LLMs) are a promising venue for natural language understanding and generation tasks. However, current LLMs are far from reliable: they are prone to generate non-factual information and, more crucially, to contradict themselves when prompted to reason about beliefs of the world. These problems are currently addressed with large scale fine-tuning or by delegating consistent reasoning to external tools. In this work, we strive for a middle ground and introduce a training objective based on principled probabilistic reasoning that teaches a LLM to be consistent with external knowledge in the form of a set of facts and rules. Fine-tuning with our loss on a limited set of facts enables our LLMs to be more logically consistent than previous baselines and allows them to extrapolate to unseen but semantically similar factual knowledge more systematically."
+bibtex: "@inproceedings{calanzone2024locolm,
+ title={Galerkin meets Laplace: Fast uncertainty estimation in neural PDEs},
+ author={Diego Calanzone, Antonio Vergari, Stefano Teso},
+ booktitle={ICLR 2024 Workshop on Reliable and Responsible Foundation Models},
+ year={2024}
+}"
+---
diff --git a/images/papers/calanzone2024locolm/locolm.png b/images/papers/calanzone2024locolm/locolm.png
new file mode 100644
index 0000000..79c9401
Binary files /dev/null and b/images/papers/calanzone2024locolm/locolm.png differ