diff --git a/_news/neurips2024.md b/_news/neurips2024.md new file mode 100644 index 0000000..42911f4 --- /dev/null +++ b/_news/neurips2024.md @@ -0,0 +1,7 @@ +--- +title: "Two papers accepted at NeurIPS 2024" +collection: news +permalink: /news/neurips-2024 +date: 2024-09-26 +--- +Our paper on how to scale probabilistic integral circuits is accepted at NeurIPS 2024 as a spotlight (!) and rsbench as a poster at the Datasets and Benchmarks track. diff --git a/_publications/bortolotti2024rsbench.md b/_publications/bortolotti2024rsbench.md index 05cc5d2..4af9cd7 100644 --- a/_publications/bortolotti2024rsbench.md +++ b/_publications/bortolotti2024rsbench.md @@ -3,21 +3,21 @@ collection: publications ref: "bortolotti2024rsbench" permalink: "publications/bortolotti2024rsbench" title: "A Benchmark Suite for Systematically Evaluating Reasoning Shortcuts" -date: 2024-06-14 00:00 +date: 2024-09-26 00:00 tags: nesy shortcuts reasoning image: "/images/papers/bortolotti2024rsbench/rsbench.png" authors: "Samuele Bortolotti, Emanuele Marconato, Tommaso Carraro, Paolo Morettin, Emile van Krieken, Antonio Vergari, Stefano Teso, Andrea Passerini" paperurl: "https://unitn-sml.github.io/rsbench/" pdf: "https://arxiv.org/pdf/2406.10368" -venue: "arXiv 2024" +venue: "NeurIPS 2024 Datasets & Benchmarks track" code: "https://github.com/unitn-sml/rsbench-code" excerpt: "How to evaluate if neuro-symbolic systems are learning the right concepts or are falling prey of resoning shortcuts?" abstract: "The advent of powerful neural classifiers has increased interest in problems that require both learning and reasoning. These problems are critical for understanding important properties of models, such as trustworthiness, generalization, interpretability, and compliance to safety and structural constraints. However, recent research observed that tasks requiring both learning and reasoning on background knowledge often suffer from reasoning shortcuts (RSs): predictors can solve the downstream reasoning task without associating the correct concepts to the high-dimensional data. To address this issue, we introduce rsbench, a comprehensive benchmark suite designed to systematically evaluate the impact of RSs on models by providing easy access to highly customizable tasks affected by RSs. Furthermore, rsbench implements common metrics for evaluating concept quality and introduces novel formal verification procedures for assessing the presence of RSs in learning tasks. Using rsbench, we highlight that obtaining high quality concepts in both purely neural and neuro-symbolic models is a far-from-solved problem." supplemental: -bibtex: "@article{bortolotti2024benchmark, +bibtex: "@inproceedings{bortolotti2024benchmark, title={A Benchmark Suite for Systematically Evaluating Reasoning Shortcuts}, author={Bortolotti, Samuele and Marconato, Emanuele and Carraro, Tommaso and Morettin, Paolo and van Krieken, Emile and Vergari, Antonio and Teso, Stefano and Passerini, Andrea}, - journal={arXiv preprint arXiv:2406.10368}, + booktitle={NeurIPS 2024 Datasets & Benchmarks track}, year={2024} }" --- diff --git a/_publications/gala2024-tenpic.md b/_publications/gala2024-tenpic.md index 2e3f98a..99a78e3 100644 --- a/_publications/gala2024-tenpic.md +++ b/_publications/gala2024-tenpic.md @@ -3,20 +3,21 @@ collection: publications ref: "gala2024tenpics" permalink: "publications/gala2024tenpics" title: "Scaling Continuous Latent Variable Models as Probabilistic Integral Circuits" -date: 2024-07-01 00:00 +date: 2024-09-26 00:00 tags: circuits probml image: "/images/papers/gala2024tenpics/tenpics.png" -authors: "Gennaro Gala, Cassio de Campos, Antonio Vergari, Erik Quaeghebeur" +authors: "Gennaro Gala, Cassio de Campos, Antonio Vergari*, Erik Quaeghebeur*" paperurl: "https://arxiv.org/abs/2406.06494" pdf: "https://arxiv.org/pdf/2406.06494" -venue: "arXiv 2024" +venue: "NeurIPS 2024" +award: "spotlight" excerpt: "We scale continuous latent variable mixtures and adapt them to have intricate dependencies, obtaining state-of-the-art likelihoods for tractable models" abstract: "Probabilistic integral circuits (PICs) have been recently introduced as probabilistic models enjoying the key ingredient behind expressive generative models: continuous latent variables (LVs). PICs are symbolic computational graphs defining continuous LV models as hierarchies of functions that are summed and multiplied together, or integrated over some LVs. They are tractable if LVs can be analytically integrated out, otherwise they can be approximated by tractable probabilistic circuits (PC) encoding a hierarchical numerical quadrature process, called QPCs. So far, only tree-shaped PICs have been explored, and training them via numerical quadrature requires memory-intensive processing at scale. In this paper, we address these issues, and present: (i) a pipeline for building DAG-shaped PICs out of arbitrary variable decompositions, (ii) a procedure for training PICs using tensorized circuit architectures, and (iii) neural functional sharing techniques to allow scalable training. In extensive experiments, we showcase the effectiveness of functional sharing and the superiority of QPCs over traditional PCs." supplemental: -bibtex: "@article{gala2024scaling, +bibtex: "@inproceedings{gala2024scaling, title={Scaling Continuous Latent Variable Models as Probabilistic Integral Circuits}, author={Gala, Gennaro and de Campos, Cassio and Vergari, Antonio and Quaeghebeur, Erik}, - journal={arXiv preprint arXiv:2406.06494}, + booktitle={NeurIPS}, year={2024} }" --- \ No newline at end of file