From e0b422b181c1e869e1e9d2444ac771ac71f18414 Mon Sep 17 00:00:00 2001 From: aver Date: Sun, 14 Jan 2024 16:31:40 +0000 Subject: [PATCH] grivas2023sigbot accepted at aaai24 --- _news/sigbot-accepted-aaai24.md | 7 +++++++ _publications/grivas2023sigbot.md | 8 ++++---- 2 files changed, 11 insertions(+), 4 deletions(-) create mode 100644 _news/sigbot-accepted-aaai24.md diff --git a/_news/sigbot-accepted-aaai24.md b/_news/sigbot-accepted-aaai24.md new file mode 100644 index 0000000..7a96d36 --- /dev/null +++ b/_news/sigbot-accepted-aaai24.md @@ -0,0 +1,7 @@ +--- +title: "Taming the Sigmoid Bottleneck" +collection: news +permalink: /news/accepted-taming-sigbot-preprint +date: 2023-12-09 +--- +One work on how to verify and fix bottlenecked sigmoid layers accepted at AAAI24. \ No newline at end of file diff --git a/_publications/grivas2023sigbot.md b/_publications/grivas2023sigbot.md index b21158b..9031a97 100644 --- a/_publications/grivas2023sigbot.md +++ b/_publications/grivas2023sigbot.md @@ -10,15 +10,15 @@ spotlight: "/images/papers/grivas2023sigbot/sigbot-spotlight.png" authors: "Andreas Grivas, Antonio Vergari, Adam Lopez" paperurl: "https://arxiv.org/abs/2310.10443" pdf: "https://arxiv.org/pdf/2310.10443.pdf" -venue: "arXiv 2023" +venue: "AAAI 2024" code: "https://github.com/andreasgrv/sigmoid-bottleneck" excerpt: "We highlight a weakness of low-rank linear multi-label classifiers: they can have meaningful outputs that cannot be predicted. We design a classifier which guarantees that sparse outputs can be predicted while using less trainable parameters." abstract: "Sigmoid output layers are widely used in multi-label classification (MLC) tasks, in which multiple labels can be assigned to any input. In many practical MLC tasks, the number of possible labels is in the thousands, often exceeding the number of input features and resulting in a low-rank output layer. In multi-class classification, it is known that such a low-rank output layer is a bottleneck that can result in unargmaxable classes: classes which cannot be predicted for any input. In this paper, we show that for MLC tasks, the analogous sigmoid bottleneck results in exponentially many unargmaxable label combinations. We explain how to detect these unargmaxable outputs and demonstrate their presence in three widely used MLC datasets. We then show that they can be prevented in practice by introducing a Discrete Fourier Transform (DFT) output layer, which guarantees that all sparse label combinations with up to k active labels are argmaxable. Our DFT layer trains faster and is more parameter efficient, matching the F1@k score of a sigmoid layer while using up to 50% fewer trainable parameters. Our code is publicly available at https://github.com/andreasgrv/sigmoid-bottleneck." supplemental: -bibtex: "@article{grivas2023taming,
+bibtex: "@inproceedings{grivas2023taming,
title={Taming the Sigmoid Bottleneck: Provably Argmaxable Sparse Multi-Label Classification},
author={Andreas Grivas and Antonio Vergari and Adam Lopez},
- journal={arXiv preprint arXiv:2310.10443},
- year={2023} + booktitle={Proceedings of the 38th Annual AAAI Conference on Artificial Intelligence},
+ year={2024} }" ---