diff --git a/_news/iclr2024.md b/_news/iclr2024.md
new file mode 100644
index 0000000..d7b65e8
--- /dev/null
+++ b/_news/iclr2024.md
@@ -0,0 +1,7 @@
+---
+title: "One paper accepted at ICLR 2024"
+collection: news
+permalink: /news/iclr-2024
+date: 2024-01-16
+---
+One paper accepted at ICLR 2024 on how to represent and learn deep mixture models encoding subtractions via squaring (as spotlight!).
diff --git a/_publications/loconte2023gekcs.md b/_publications/loconte2023gekcs.md
index 9067b4c..c68443d 100644
--- a/_publications/loconte2023gekcs.md
+++ b/_publications/loconte2023gekcs.md
@@ -8,18 +8,18 @@ tags: nesy kge circuits constraints
image: "/images/papers/loconte2023gekcs/gekcs.png"
spotlight: "/images/papers/loconte2023gekcs/gekcs-spotlight.png"
authors: "Lorenzo Loconte, Nicola Di Mauro, Robert Peharz, Antonio Vergari"
-paperurl: "https://arxiv.org/abs/2305.15944"
-pdf: "https://arxiv.org/pdf/2305.15944.pdf"
+paperurl: "https://openreview.net/forum?id=RSGNGiB1q4"
+pdf: "https://openreview.net/pdf?id=RSGNGiB1q4"
venue: "NeurIPS 2023"
award: "oral (top 0.6%)"
code: "https://github.com/april-tools/gekcs"
-excerpt: "KGE models such as CP, RESCAL, TuckER, ComplEx can be re-interpreted as circuits to unlock their generative capabilities, scaling up learning and guaranteeing the satisfaction of logical constraints by design."
+excerpt: "KGE models such as CP, RESCAL, TuckER, ComplEx can be re-interpreted as circuits to unlock their generative capabilities, scaling up inference and learning and guaranteeing the satisfaction of logical constraints by design."
abstract: "Some of the most successful knowledge graph embedding (KGE) models for link prediction -- CP, RESCAL, TuckER, ComplEx -- can be interpreted as energy-based models. Under this perspective they are not amenable for exact maximum-likelihood estimation (MLE), sampling and struggle to integrate logical constraints. This work re-interprets the score functions of these KGEs as circuits -- constrained computational graphs allowing efficient marginalisation. Then, we design two recipes to obtain efficient generative circuit models by either restricting their activations to be non-negative or squaring their outputs. Our interpretation comes with little or no loss of performance for link prediction, while the circuits framework unlocks exact learning by MLE, efficient sampling of new triples, and guarantee that logical constraints are satisfied by design. Furthermore, our models scale more gracefully than the original KGEs on graphs with millions of entities. "
supplemental:
-bibtex: "@article{loconte2023gekcs,
- title={How to Turn Your Knowledge Graph Embeddings into Generative Models via Probabilistic Circuits},
- author={Loconte, Lorenzo and Di Mauro, Nicola and Peharz, Robert and Vergari, Antonio},
- journal={arXiv preprint arXiv:2305.15944},
- year={2023}
-}"
+bibtex: "@inproceedings{loconte2023how,
+ title={How to Turn Your Knowledge Graph Embeddings into Generative Models},
+ author={Lorenzo Loconte and Nicola Di Mauro and Robert Peharz and Antonio Vergari},
+ booktitle={Thirty-seventh Conference on Neural Information Processing Systems},
+ year={2023},
+ url={https://openreview.net/forum?id=RSGNGiB1q4}}"
---
diff --git a/_publications/loconte2023subtractive.md b/_publications/loconte2023subtractive.md
index f84898b..ba4def8 100644
--- a/_publications/loconte2023subtractive.md
+++ b/_publications/loconte2023subtractive.md
@@ -3,22 +3,23 @@ collection: publications
ref: "loconte2023subtractive"
permalink: "publications/loconte2023subtractive"
title: "Subtractive Mixture Models via Squaring: Representation and Learning"
-date: 2023-09-30 00:00
+date: 2024-01-16 00:00
tags: circuits probml
image: "/images/papers/loconte2023subtractive/subtractive-ring.png"
spotlight: "/images/papers/loconte2023subtractive/subtractive-spotlight.png"
authors: "Lorenzo Loconte, Aleksanteri M. Sladek, Stefan Mengel, Martin Trapp, Arno Solin, Nicolas Gillis, Antonio Vergari"
-paperurl: "https://arxiv.org/abs/2310.00724"
-pdf: "https://arxiv.org/abs/2310.00724"
-venue: "arXiv 2023"
-code:
+paperurl: "https://openreview.net/forum?id=xIHi5nxu9P"
+pdf: "https://openreview.net/pdf?id=xIHi5nxu9P"
+venue: "ICLR 2024"
+award: "spotlight (top 5%)"
+code: "https://github.com/april-tools/squared-npcs"
excerpt: "We propose to build (deep) subtractive mixture models by squaring circuits. We theoretically prove their expressiveness by deriving an exponential lowerbound on the size of circuits with positive parameters only."
abstract: "Mixture models are traditionally represented and learned by adding several distributions as components. Allowing mixtures to subtract probability mass or density can drastically reduce the number of components needed to model complex distributions. However, learning such subtractive mixtures while ensuring they still encode a non-negative function is challenging. We investigate how to learn and perform inference on deep subtractive mixtures by squaring them. We do this in the framework of probabilistic circuits, which enable us to represent tensorized mixtures and generalize several other subtractive models. We theoretically prove that the class of squared circuits allowing subtractions can be exponentially more expressive than traditional additive mixtures; and, we empirically show this increased expressiveness on a series of real-world distribution estimation tasks."
supplemental:
-bibtex: "@article{loconte2023subtractive,
+bibtex: "@inproceedings{loconte2024subtractive,
title={Subtractive Mixture Models via Squaring: Representation and Learning},
- author={Lorenzo Loconte and Aleksanteri M. Sladek and Stefan Mengel and Martin Trapp and Arno Solin and Nicolas Gillis and Antonio Vergari},
- journal={arXiv preprint arXiv:2310.00724},
- year={2023}
-}"
+ author={Loconte, Lorenzo and Aleksanteri, M. Sladek and Mengel, Stefan and Trapp, Martin and Solin, Arno and Gillis, Nicolas and Vergari, Antonio},
+ booktitle={The Twelfth International Conference on Learning Representations},
+ year={2024},
+ url={https://openreview.net/forum?id=xIHi5nxu9P}}"
---