diff --git a/_news/aaai2025.md b/_news/aaai2025.md new file mode 100644 index 0000000..3516799 --- /dev/null +++ b/_news/aaai2025.md @@ -0,0 +1,7 @@ +--- +title: "One paper accepted at AAAI 2025" +collection: news +permalink: /news/aaai-2025 +date: 2024-12-10 +--- +Our paper on sum of squares circuits is accepted at AAAI 2025. diff --git a/_publications/loconte2024faster.md b/_publications/loconte2024faster.md new file mode 100644 index 0000000..d2a4374 --- /dev/null +++ b/_publications/loconte2024faster.md @@ -0,0 +1,24 @@ +--- +collection: publications +ref: "loconte2024faster" +permalink: "publications/loconte2024faster" +title: "On Faster Marginalization with Squared Circuits via Orthonormalization" +date: 2024-12-10 00:00 +tags: circuits probml tensor-networks +image: "/images/papers/loconte2024faster/mar-squared-circuit.png" +authors: "Lorenzo Loconte, Antonio Vergari" +paperurl: "https://arxiv.org/abs/2412.07883" +pdf: "https://arxiv.org/abs/2412.07883" +venue: "arXiv 2024" +excerpt: " Inspired by canonical forms in tensor networks, we devise sufficient conditions to ensure squared circuits are already normalized and then devise a more efficient marginalization algorithm." +abstract: "Squared tensor networks (TNs) and their generalization as parameterized computational graphs -- squared circuits -- have been recently used as expressive distribution estimators in high dimensions. However, the squaring operation introduces additional complexity when marginalizing variables or computing the partition function, which hinders their usage in machine learning applications. Canonical forms of popular TNs are parameterized via unitary matrices as to simplify the computation of particular marginals, but cannot be mapped to general circuits since these might not correspond to a known TN. Inspired by TN canonical forms, we show how to parameterize squared circuits to ensure they encode already normalized distributions. We then use this parameterization to devise an algorithm to compute any marginal of squared circuits that is more efficient than a previously known one. We conclude by formally showing the proposed parameterization comes with no expressiveness loss for many circuit classes." +supplemental: +bibtex: "@misc{loconte2024faster,
+ title={On Faster Marginalization with Squared Circuits via Orthonormalization},
+ author={Lorenzo Loconte and Antonio Vergari},
+ year={2024},
+ eprint={2412.07883},
+ archivePrefix={arXiv},
+ primaryClass={cs.LG},
+ url={https://arxiv.org/abs/2412.07883}}" +--- diff --git a/_publications/loconte2024sos.md b/_publications/loconte2024sos.md index 97e7ea5..3c59886 100644 --- a/_publications/loconte2024sos.md +++ b/_publications/loconte2024sos.md @@ -3,22 +3,21 @@ collection: publications ref: "loconte2024sos" permalink: "publications/loconte2024sos" title: "Sum of Squares Circuits" -date: 2024-08-21 00:00 +date: 2024-12-10 00:00 tags: circuits probml image: "/images/papers/loconte2024sos/sos-hierarchy.png" spotlight: "/images/papers/loconte2024sos/sos-spotlight.png" authors: "Lorenzo Loconte, Stefan Mengel, Antonio Vergari" paperurl: "https://arxiv.org/abs/2408.11778" pdf: "https://arxiv.org/abs/2408.11778" -venue: "arXiv 2024" +venue: "AAAI 2025" +code: "https://github.com/april-tools/sos-npcs" excerpt: "We theoretically prove an expressiveness limitation of deep subtractive mixture models learned by squaring circuits. To overcome this limitation, we propose sum of squares circuits and build an expressiveness hierarchy around them, allowing us to unify and separate many tractable probabilistic models." abstract: "Designing expressive generative models that support exact and efficient inference is a core question in probabilistic ML. Probabilistic circuits (PCs) offer a framework where this tractability-vs-expressiveness trade-off can be analyzed theoretically. Recently, squared PCs encoding subtractive mixtures via negative parameters have emerged as tractable models that can be exponentially more expressive than monotonic PCs, i.e., PCs with positive parameters only. In this paper, we provide a more precise theoretical characterization of the expressiveness relationships among these models. First, we prove that squared PCs can be less expressive than monotonic ones. Second, we formalize a novel class of PCs -- sum of squares PCs -- that can be exponentially more expressive than both squared and monotonic PCs. Around sum of squares PCs, we build an expressiveness hierarchy that allows us to precisely unify and separate different tractable model classes such as Born Machines and PSD models, and other recently introduced tractable probabilistic models by using complex parameters. Finally, we empirically show the effectiveness of sum of squares circuits in performing distribution estimation." supplemental: -bibtex: "@misc{loconte2024sumsquarescircuits,
+bibtex: "@inproceedings{loconte2024sos,
title={Sum of Squares Circuits},
author={Lorenzo Loconte and Stefan Mengel and Antonio Vergari},
- year={2024},
- eprint={2408.11778},
- archivePrefix={arXiv},
- primaryClass={cs.LG}}" + year={2025},
+ booktitle={The 39th Annual AAAI Conference on Artificial Intelligence}}" --- diff --git a/images/papers/loconte2024faster/mar-squared-circuit.png b/images/papers/loconte2024faster/mar-squared-circuit.png new file mode 100644 index 0000000..ba98192 Binary files /dev/null and b/images/papers/loconte2024faster/mar-squared-circuit.png differ