From 3827cec17fea183c1beaa9e9102ebd1d0bdfb2a7 Mon Sep 17 00:00:00 2001 From: Crazyang Date: Tue, 25 Oct 2022 15:23:35 +0800 Subject: [PATCH] resort --- .../AutoDecoder (AD)}/Auto-Decoder.md | 0 .../AutoDecoder (AD)}/README.md | 0 1-Variational AutoEncoder (VAE)/README.md | 33 +++ ...67\344\275\223\345\256\236\347\216\260.md" | 0 ...40\351\200\237\347\256\227\346\263\225.md" | 0 ...30\347\247\215\344\273\213\347\273\215.md" | 0 ...06\350\256\272\346\216\250\345\257\274.md" | 0 .../README.md | 0 .../README.md | 0 .../Normalizing flow.md | 0 {3-Nomalizing Flow => 4-Flow}/Notes.md | 0 {3-Nomalizing Flow => 4-Flow}/README.md | 0 .../Notes.md | 0 .../README.md | 0 .../README.md | 0 {CLIP => 7-Text-to-Image/CLIP}/CLIP.png | Bin .../CLIP}/README-Notes.md | 0 {CLIP => 7-Text-to-Image/CLIP}/test.py | 0 .../Notes.md | 0 .../README.md | 0 .../KL\346\225\243\345\272\246.md" | 0 .../0-Evaluation/Perceptual Loss.md | 0 .../0-Evaluation/REAMDME.md | 0 .../0-Evaluation/loss.md | 0 .../0-Evaluation/perceptual_loss.py | 0 .../\350\267\235\347\246\273 Distance.md" | 0 .../Loss function/Distance.md | 0 .../Loss function/README.md | 0 .../README.md | 0 .../reg summary.md | 0 {14-Animation => Others/Animation}/README.md | 0 README.md | 217 +++--------------- "\347\273\223\346\236\204.md" | 22 ++ 33 files changed, 84 insertions(+), 188 deletions(-) rename {6-AutoDecoder (AD) => 1-Variational AutoEncoder (VAE)/AutoDecoder (AD)}/Auto-Decoder.md (100%) rename {6-AutoDecoder (AD) => 1-Variational AutoEncoder (VAE)/AutoDecoder (AD)}/README.md (100%) rename "5-Diffusion Model/Learning Material/\345\205\267\344\275\223\345\256\236\347\216\260.md" => "2-Diffusion Model/Learning Material/\345\205\267\344\275\223\345\256\236\347\216\260.md" (100%) rename "5-Diffusion Model/Learning Material/\345\212\240\351\200\237\347\256\227\346\263\225.md" => "2-Diffusion Model/Learning Material/\345\212\240\351\200\237\347\256\227\346\263\225.md" (100%) rename "5-Diffusion Model/Learning Material/\345\217\230\347\247\215\344\273\213\347\273\215.md" => "2-Diffusion Model/Learning Material/\345\217\230\347\247\215\344\273\213\347\273\215.md" (100%) rename "5-Diffusion Model/Learning Material/\346\211\251\346\225\243\346\250\241\345\236\213 \347\220\206\350\256\272\346\216\250\345\257\274.md" => "2-Diffusion Model/Learning Material/\346\211\251\346\225\243\346\250\241\345\236\213 \347\220\206\350\256\272\346\216\250\345\257\274.md" (100%) rename {5-Diffusion Model => 2-Diffusion Model}/README.md (100%) rename {2-Energy-Based Models (EBM) => 3-Energy-Based Model (EBM)}/README.md (100%) rename {3-Nomalizing Flow => 4-Flow}/Normalizing flow.md (100%) rename {3-Nomalizing Flow => 4-Flow}/Notes.md (100%) rename {3-Nomalizing Flow => 4-Flow}/README.md (100%) rename {11-Representation Learning => 5-Representation Learning}/Notes.md (100%) rename {11-Representation Learning => 5-Representation Learning}/README.md (100%) rename {12-Disentangled Representations => 6-Disentangled Representation}/README.md (100%) rename {CLIP => 7-Text-to-Image/CLIP}/CLIP.png (100%) rename {CLIP => 7-Text-to-Image/CLIP}/README-Notes.md (100%) rename {CLIP => 7-Text-to-Image/CLIP}/test.py (100%) rename {13-Text-to-Image => 7-Text-to-Image}/Notes.md (100%) rename {13-Text-to-Image => 7-Text-to-Image}/README.md (100%) rename "Evaluation & Loss/0-Evaluation/KL\346\225\243\345\272\246.md" => "8-Evaluation & Loss/0-Evaluation/KL\346\225\243\345\272\246.md" (100%) rename {Evaluation & Loss => 8-Evaluation & Loss}/0-Evaluation/Perceptual Loss.md (100%) rename {Evaluation & Loss => 8-Evaluation & Loss}/0-Evaluation/REAMDME.md (100%) rename {Evaluation & Loss => 8-Evaluation & Loss}/0-Evaluation/loss.md (100%) rename {Evaluation & Loss => 8-Evaluation & Loss}/0-Evaluation/perceptual_loss.py (100%) rename "Evaluation & Loss/0-Evaluation/\350\267\235\347\246\273 Distance.md" => "8-Evaluation & Loss/0-Evaluation/\350\267\235\347\246\273 Distance.md" (100%) rename {Evaluation & Loss => 8-Evaluation & Loss}/Loss function/Distance.md (100%) rename {Evaluation & Loss => 8-Evaluation & Loss}/Loss function/README.md (100%) rename {Evaluation & Loss => 8-Evaluation & Loss}/README.md (100%) rename {Evaluation & Loss => 8-Evaluation & Loss}/reg summary.md (100%) rename {14-Animation => Others/Animation}/README.md (100%) diff --git a/6-AutoDecoder (AD)/Auto-Decoder.md b/1-Variational AutoEncoder (VAE)/AutoDecoder (AD)/Auto-Decoder.md similarity index 100% rename from 6-AutoDecoder (AD)/Auto-Decoder.md rename to 1-Variational AutoEncoder (VAE)/AutoDecoder (AD)/Auto-Decoder.md diff --git a/6-AutoDecoder (AD)/README.md b/1-Variational AutoEncoder (VAE)/AutoDecoder (AD)/README.md similarity index 100% rename from 6-AutoDecoder (AD)/README.md rename to 1-Variational AutoEncoder (VAE)/AutoDecoder (AD)/README.md diff --git a/1-Variational AutoEncoder (VAE)/README.md b/1-Variational AutoEncoder (VAE)/README.md index da90818..39c8c87 100644 --- a/1-Variational AutoEncoder (VAE)/README.md +++ b/1-Variational AutoEncoder (VAE)/README.md @@ -1,5 +1,29 @@ # Variational Auto-Encoder (VAE) +The majority of the research efforts on improving VAEs is dedicated to the statistical challenges, such as: + +- reducing the gap between approximate and true posterior distribution +- formulatig tighter bounds +- reducing the gradient noise +- extending VAEs to discrete variables +- tackling posterior collapse +- designing special network architectures + - previous work just borrows the architectures from the classification tasks + + + +VAEs maximize the mutual information between the input and latent variables, requiring the networks to retain the information content of the input data as much as possible. + +Information maximization in noisy channels: A variational approach +**[`NeurIPS 2017`]** + +Deep variational information bottleneck +**[`ICLR 2017`]** + + + + + 学习资料 https://jaan.io/what-is-variational-autoencoder-vae-tutorial/ @@ -78,3 +102,12 @@ VQVAE通过Encoder学习出中间编码,然后通过最邻近搜索将中间 【这样其实实现的是一种压缩的效果】 + + + + +参考: + +https://www.jeremyjordan.me/variational-autoencoders/ + +https://www.jeremyjordan.me/autoencoders/ diff --git "a/5-Diffusion Model/Learning Material/\345\205\267\344\275\223\345\256\236\347\216\260.md" "b/2-Diffusion Model/Learning Material/\345\205\267\344\275\223\345\256\236\347\216\260.md" similarity index 100% rename from "5-Diffusion Model/Learning Material/\345\205\267\344\275\223\345\256\236\347\216\260.md" rename to "2-Diffusion Model/Learning Material/\345\205\267\344\275\223\345\256\236\347\216\260.md" diff --git "a/5-Diffusion Model/Learning Material/\345\212\240\351\200\237\347\256\227\346\263\225.md" "b/2-Diffusion Model/Learning Material/\345\212\240\351\200\237\347\256\227\346\263\225.md" similarity index 100% rename from "5-Diffusion Model/Learning Material/\345\212\240\351\200\237\347\256\227\346\263\225.md" rename to "2-Diffusion Model/Learning Material/\345\212\240\351\200\237\347\256\227\346\263\225.md" diff --git "a/5-Diffusion Model/Learning Material/\345\217\230\347\247\215\344\273\213\347\273\215.md" "b/2-Diffusion Model/Learning Material/\345\217\230\347\247\215\344\273\213\347\273\215.md" similarity index 100% rename from "5-Diffusion Model/Learning Material/\345\217\230\347\247\215\344\273\213\347\273\215.md" rename to "2-Diffusion Model/Learning Material/\345\217\230\347\247\215\344\273\213\347\273\215.md" diff --git "a/5-Diffusion Model/Learning Material/\346\211\251\346\225\243\346\250\241\345\236\213 \347\220\206\350\256\272\346\216\250\345\257\274.md" "b/2-Diffusion Model/Learning Material/\346\211\251\346\225\243\346\250\241\345\236\213 \347\220\206\350\256\272\346\216\250\345\257\274.md" similarity index 100% rename from "5-Diffusion Model/Learning Material/\346\211\251\346\225\243\346\250\241\345\236\213 \347\220\206\350\256\272\346\216\250\345\257\274.md" rename to "2-Diffusion Model/Learning Material/\346\211\251\346\225\243\346\250\241\345\236\213 \347\220\206\350\256\272\346\216\250\345\257\274.md" diff --git a/5-Diffusion Model/README.md b/2-Diffusion Model/README.md similarity index 100% rename from 5-Diffusion Model/README.md rename to 2-Diffusion Model/README.md diff --git a/2-Energy-Based Models (EBM)/README.md b/3-Energy-Based Model (EBM)/README.md similarity index 100% rename from 2-Energy-Based Models (EBM)/README.md rename to 3-Energy-Based Model (EBM)/README.md diff --git a/3-Nomalizing Flow/Normalizing flow.md b/4-Flow/Normalizing flow.md similarity index 100% rename from 3-Nomalizing Flow/Normalizing flow.md rename to 4-Flow/Normalizing flow.md diff --git a/3-Nomalizing Flow/Notes.md b/4-Flow/Notes.md similarity index 100% rename from 3-Nomalizing Flow/Notes.md rename to 4-Flow/Notes.md diff --git a/3-Nomalizing Flow/README.md b/4-Flow/README.md similarity index 100% rename from 3-Nomalizing Flow/README.md rename to 4-Flow/README.md diff --git a/11-Representation Learning/Notes.md b/5-Representation Learning/Notes.md similarity index 100% rename from 11-Representation Learning/Notes.md rename to 5-Representation Learning/Notes.md diff --git a/11-Representation Learning/README.md b/5-Representation Learning/README.md similarity index 100% rename from 11-Representation Learning/README.md rename to 5-Representation Learning/README.md diff --git a/12-Disentangled Representations/README.md b/6-Disentangled Representation/README.md similarity index 100% rename from 12-Disentangled Representations/README.md rename to 6-Disentangled Representation/README.md diff --git a/CLIP/CLIP.png b/7-Text-to-Image/CLIP/CLIP.png similarity index 100% rename from CLIP/CLIP.png rename to 7-Text-to-Image/CLIP/CLIP.png diff --git a/CLIP/README-Notes.md b/7-Text-to-Image/CLIP/README-Notes.md similarity index 100% rename from CLIP/README-Notes.md rename to 7-Text-to-Image/CLIP/README-Notes.md diff --git a/CLIP/test.py b/7-Text-to-Image/CLIP/test.py similarity index 100% rename from CLIP/test.py rename to 7-Text-to-Image/CLIP/test.py diff --git a/13-Text-to-Image/Notes.md b/7-Text-to-Image/Notes.md similarity index 100% rename from 13-Text-to-Image/Notes.md rename to 7-Text-to-Image/Notes.md diff --git a/13-Text-to-Image/README.md b/7-Text-to-Image/README.md similarity index 100% rename from 13-Text-to-Image/README.md rename to 7-Text-to-Image/README.md diff --git "a/Evaluation & Loss/0-Evaluation/KL\346\225\243\345\272\246.md" "b/8-Evaluation & Loss/0-Evaluation/KL\346\225\243\345\272\246.md" similarity index 100% rename from "Evaluation & Loss/0-Evaluation/KL\346\225\243\345\272\246.md" rename to "8-Evaluation & Loss/0-Evaluation/KL\346\225\243\345\272\246.md" diff --git a/Evaluation & Loss/0-Evaluation/Perceptual Loss.md b/8-Evaluation & Loss/0-Evaluation/Perceptual Loss.md similarity index 100% rename from Evaluation & Loss/0-Evaluation/Perceptual Loss.md rename to 8-Evaluation & Loss/0-Evaluation/Perceptual Loss.md diff --git a/Evaluation & Loss/0-Evaluation/REAMDME.md b/8-Evaluation & Loss/0-Evaluation/REAMDME.md similarity index 100% rename from Evaluation & Loss/0-Evaluation/REAMDME.md rename to 8-Evaluation & Loss/0-Evaluation/REAMDME.md diff --git a/Evaluation & Loss/0-Evaluation/loss.md b/8-Evaluation & Loss/0-Evaluation/loss.md similarity index 100% rename from Evaluation & Loss/0-Evaluation/loss.md rename to 8-Evaluation & Loss/0-Evaluation/loss.md diff --git a/Evaluation & Loss/0-Evaluation/perceptual_loss.py b/8-Evaluation & Loss/0-Evaluation/perceptual_loss.py similarity index 100% rename from Evaluation & Loss/0-Evaluation/perceptual_loss.py rename to 8-Evaluation & Loss/0-Evaluation/perceptual_loss.py diff --git "a/Evaluation & Loss/0-Evaluation/\350\267\235\347\246\273 Distance.md" "b/8-Evaluation & Loss/0-Evaluation/\350\267\235\347\246\273 Distance.md" similarity index 100% rename from "Evaluation & Loss/0-Evaluation/\350\267\235\347\246\273 Distance.md" rename to "8-Evaluation & Loss/0-Evaluation/\350\267\235\347\246\273 Distance.md" diff --git a/Evaluation & Loss/Loss function/Distance.md b/8-Evaluation & Loss/Loss function/Distance.md similarity index 100% rename from Evaluation & Loss/Loss function/Distance.md rename to 8-Evaluation & Loss/Loss function/Distance.md diff --git a/Evaluation & Loss/Loss function/README.md b/8-Evaluation & Loss/Loss function/README.md similarity index 100% rename from Evaluation & Loss/Loss function/README.md rename to 8-Evaluation & Loss/Loss function/README.md diff --git a/Evaluation & Loss/README.md b/8-Evaluation & Loss/README.md similarity index 100% rename from Evaluation & Loss/README.md rename to 8-Evaluation & Loss/README.md diff --git a/Evaluation & Loss/reg summary.md b/8-Evaluation & Loss/reg summary.md similarity index 100% rename from Evaluation & Loss/reg summary.md rename to 8-Evaluation & Loss/reg summary.md diff --git a/14-Animation/README.md b/Others/Animation/README.md similarity index 100% rename from 14-Animation/README.md rename to Others/Animation/README.md diff --git a/README.md b/README.md index 3424f22..ca7845a 100644 --- a/README.md +++ b/README.md @@ -4,14 +4,10 @@ [![Maintenance](https://img.shields.io/badge/Maintained%3F-yes-green.svg)](https://GitHub.com/Naereen/StrapDown.js/graphs/commit-activity) [![PR's Welcome](https://img.shields.io/badge/PRs-welcome-brightgreen.svg?style=flat)](http://makeapullrequest.com) -A collection of resources on 2D Generative Model. +A collection of resources on 2D Generative Model which utilize generator functions that map low-dimensional latent codes to high-dimensional data outputs.. -A collection of resources on generative models which utilize generator functions that map low-dimensional latent codes to high-dimensional data outputs. - -We would define a prior distribution for the latent space, however this prior may not match the true and agnostic data manifold. It’s an obstacles yielding less accurate generation. - ## Contributing Feedback and contributions are welcome! If you think I have missed out on something (or) have any suggestions (papers, implementations and other resources), feel free to pull a request or leave an issue. I will release the [latex-pdf version]() in the future. :arrow_down:markdown format: @@ -24,46 +20,58 @@ Feedback and contributions are welcome! If you think I have missed out on someth :smile: Now you can use this [script](https://github.com/yzy1996/Python-Code/tree/master/Python%2BarXiv) to automatically generate the above text. -## Category -**3D-Aware Generation** has been moved to **[Learn 3D from 2D](https://github.com/yzy1996/Awesome-Learn-3D-From-2D)** + +## Contents **GAN related sources** has been moved to **[GAN](https://github.com/yzy1996/Awesome-GANs)** +**3D-Aware Generation** has been moved to **[Learn 3D from 2D](https://github.com/yzy1996/Awesome-Learn-3D-From-2D)** + + + +1. [Variational AutoEncoder (VAE)](./1-Variational-AutoEncoder-(VAE)) +2. [Diffusion Model](./2-Diffusion-Model) +3. [Energy-Based Model (EBM)](./3-Energy-Based-Model-(EBM)) +4. [Flow](./4-Flow) +5. [Representation Learning](./5-Representation-Learning) +6. [Disentangled Representation](./6-Disentangled-Representation) +7. [Text-to-Image](./7-Text-to-Image) +8. [Evaluation & Loss](./8-Evaluation-&-Loss) +9. [Others](./Others) + ## Introduction -photorealistic image synthesis +![img](https://raw.githubusercontent.com/yzy1996/Image-Hosting/master/generative-overview.png) -- high resolution cc -- content controllable +
中文介绍

-compositional nature of scenes +表征(representation)和重构(reconstruction)一直是不分家的两个研究话题。 -- individual objects' shapes -- appearances -- background +核心目标是重构,但就像我看到一幅画面,想要转述给另一个人,让他也想象出这个画面的场景,人会将这幅画抽象为一些特征,例如这幅画是自然风光,有很多树,颜色很绿,等等。然后另一个人再根据这些描述,通过自己预先知道的人生阅历,就能还原这幅画。或者就像公安在找犯人的时候,需要通过描述嫌疑人画像。是通过一些特征在刻画的。 +机器同样也需要这样一套范式,只不过可能并不像人一样的语意理解。为了可解释性,以及可控性,我们是希望机器能按照人能理解的一套特征来。 +

+
-Modern computer graphics (CG) techniques have achieved impressive results and are industry standard in gaming and movie productions. However, they are very hardware and computing expensive and require substantial repetitive labor. -Therefore, the ability to generate and manipulate photorealistic image content is a long-standing goal of computer vision and graphics. -There models try to model the real world by generating realistic samples from latent representations. +The ability to generate and manipulate photorealistic image content (**high resolution** & **content controllable**) is a long-standing goal of computer vision and graphics. We try to model the real world by generating realistic samples from latent representations. - divide deep generative models broadly into three categories: +Deep generative models can be divided broadly into three categories: -- Generative Adversarial Networks +- **Generative Adversarial Networks** > use discriminator networks that are trained to distinguish samples from generator networks and real examples -- Likelihood-based Model +- **Likelihood-based Model** > directly optimize the model log-likelihood or the evidence lower bound. @@ -75,178 +83,11 @@ There models try to model the real world by generating realistic samples from la - autoregressive models -- Energy-based Models +- **Energy-based Models** > estimate a scalar energy for each example that corresponds to an unnormalized log-probability - - -### VAE - -The majority of the research efforts on improving VAEs is dedicated to the statistical challenges, such as: - -- reducing the gap between approximate and true posterior distribution -- formulatig tighter bounds -- reducing the gradient noise -- extending VAEs to discrete variables -- tackling posterior collapse -- designing special network architectures - - previous work just borrows the architectures from the classification tasks - - - -VAEs maximize the mutual information between the input and latent variables, requiring the networks to retain the information content of the input data as much as possible. - -Information maximization in noisy channels: A variational approach -**[`NeurIPS 2017`]** - -Deep variational information bottleneck -**[`ICLR 2017`]** - - - - - -表征(representation)和重构(reconstruction)一直是不分家的两个研究话题。 - -核心目标是重构,但就像我看到一幅画面,想要转述给另一个人,让他也想象出这个画面的场景,人会将这幅画抽象为一些特征,例如这幅画是自然风光,有很多树,颜色很绿,等等。然后另一个人再根据这些描述,通过自己预先知道的人生阅历,就能还原这幅画/ - -或者就像公安在找犯人的时候,需要通过描述嫌疑人画像。是通过一些特征在刻画的。 - -机器同样也需要这样一套范式,只不过可能并不像人一样的语意理解 - -为了可解释性,以及可控性,我们是希望机器能按照人能理解的一套特征来 - -![image-20220612154943172](https://raw.githubusercontent.com/yzy1996/Image-Hosting/master/image-20220612154943172.png) - - - -AutoDecoder - - - - - -这里又需要提及一下重建loss - - - -## Introduction - -Generative models can be divided into two classes: - -- implicit generative models (IGMs) -- explicit generative models (EGMs) - - - -Our goal is to train a model $\mathbb{Q}_{\theta}$ which aims to approximate a target distribution $\mathbb{P}$ over a space $\mathcal{X} \subseteq \mathbb{R}^{d}$. - -Normally we define $\mathbb{Q}_{\theta}$ by a generator function $G_{\theta}: \mathcal{Z} \rightarrow \mathcal{X}$, implemented as a deep network with parameters $\theta$, where $\mathcal{Z}$ is a space of latent vectors, say $\mathcal{R}^{128}$. We assume a fixed Gaussian distribution on $\mathcal{Z}$, and call $\mathbb{Q}_{\theta}$ the distribution of $G_{\theta}(Z)$. - -The optimization process is to learn by minimizing a discrepancy $\mathcal{D}$ between distributions , with the property $\mathcal{D}(\mathbb{P}, \mathbb{Q}_{\theta}) \geq 0$ and $\mathcal{D}(\mathbb{P}, \mathbb{P})=0$. - - - -we can build loss $\mathcal{D}$ based on the Maximum Mean Discrepancy, -$$ -\operatorname{MMD}_{k}(\mathbb{P}, \mathbb{Q})=\sup _{f:\|f\|_{\mathcal{H}_{k}} \leq 1} \mathbb{E}_{X \sim \mathbb{P}}[f(X)]-\mathbb{E}_{Y \sim \mathbb{Q}}[f(Y)] -$$ -where $\mathcal{H}_k$ is the reproducing kernel Hilbert space with a kernel $k$. - - - - - -Wasserstein distance -$$ -\mathcal{W}(\mathbb{P}, \mathbb{Q})=\sup _{f:\|f\|_{\text {Lip }} \leq 1} \mathbb{E}_{X \sim \mathbb{P}}[f(X)]-\mathbb{E}_{Y \sim \mathbb{Q}}[f(Y)] -$$ - - - - - -There are three main methods: - -- VAE - -- GAN -- Flow - -They both learn from the training data and use the learned model to generate or predict new instances. - - - -相同点:都用到了随机噪声,然后度量噪声和真实数据的分布差异 - -不同点:GAN为了拟合数据分布,VAE为了找到数据的隐式表达,Flow建立训练数据和生成数据之间的关系 - -GAN 和 Flow 的输入和输出都是一一对应的,而VAE不是 - - - -训练的损失函数上: - -VAE最大化ELBO,其目的是要做最大似然估计,最大似然估计等价于最小化KL,但这个KL不是数据和噪声的KL,而是model给出的![[公式]](https://www.zhihu.com/equation?tex=p%28x%29)和数据所展示的![[公式]](https://www.zhihu.com/equation?tex=p%28x%29)之间的KL。 - -GAN是最小化JS,这个JS也是model给出的![[公式]](https://www.zhihu.com/equation?tex=p%28x%29)和数据所展示的![[公式]](https://www.zhihu.com/equation?tex=p%28x%29)之间的。 - -流模型训练也非常直接,也是最大似然估计。只不过因为流模型用的是可逆神经网络,因此,相比于其他两者,学习inference即学习隐含表示非常容易, - - - - -## GAN 2014 - -Generative Adversarial Networks (GANs) emerge as a powerful class of generative models. In particular, they are able to synthesize photorealistic images at high resolutions ($$1024 \times 1024$$) pixels which can not be distinguished. - - - -GANs and its variants - - - -train with adversarial methods, bypass the need of computing densities, at the expense of a good density estimation - -Generative adversarial networks (GANs) represent a zero-sum game between two machine players, a generator and a discriminator, designed to learn the distribution of data. - - - -> 只要能骗过Discriminator就好 - - - -## VAE 2013 - -at the cost of learning two neural networks - - - - - -## VAE-GAN - -combine VAE with GAN - - - -## Bijective GNN - - - -## Flow - - - -## Inverse Rendering / Graphics - -Given 2D image observations, these approaches aim to infer a 3D-structure-aware representation of the underlying scene that enables prior-based predictions about occluded parts. - -参考: -https://www.jeremyjordan.me/variational-autoencoders/ -https://www.jeremyjordan.me/autoencoders/ diff --git "a/\347\273\223\346\236\204.md" "b/\347\273\223\346\236\204.md" index 05e05cf..f4af4fb 100644 --- "a/\347\273\223\346\236\204.md" +++ "b/\347\273\223\346\236\204.md" @@ -261,3 +261,25 @@ Neural Radiance Field (NeRF) + + + + +Our goal is to train a model $\mathbb{Q}_{\theta}$ which aims to approximate a target distribution $\mathbb{P}$ over a space $\mathcal{X} \subseteq \mathbb{R}^{d}$. + +Normally we define $\mathbb{Q}_{\theta}$ by a generator function $G_{\theta}: \mathcal{Z} \rightarrow \mathcal{X}$, implemented as a deep network with parameters $\theta$, where $\mathcal{Z}$ is a space of latent vectors, say $\mathcal{R}^{128}$. We assume a fixed Gaussian distribution on $\mathcal{Z}$, and call $\mathbb{Q}_{\theta}$ the distribution of $G_{\theta}(Z)$. + +The optimization process is to learn by minimizing a discrepancy $\mathcal{D}$ between distributions , with the property $\mathcal{D}(\mathbb{P}, \mathbb{Q}_{\theta}) \geq 0$ and $\mathcal{D}(\mathbb{P}, \mathbb{P})=0$. + + + +we can build loss $\mathcal{D}$ based on the Maximum Mean Discrepancy, +$$ +\operatorname{MMD}_{k}(\mathbb{P}, \mathbb{Q})=\sup _{f:\|f\|_{\mathcal{H}_{k}} \leq 1} \mathbb{E}_{X \sim \mathbb{P}}[f(X)]-\mathbb{E}_{Y \sim \mathbb{Q}}[f(Y)] +$$ +where $\mathcal{H}_k$ is the reproducing kernel Hilbert space with a kernel $k$. + +Wasserstein distance +$$ +\mathcal{W}(\mathbb{P}, \mathbb{Q})=\sup _{f:\|f\|_{\text {Lip }} \leq 1} \mathbb{E}_{X \sim \mathbb{P}}[f(X)]-\mathbb{E}_{Y \sim \mathbb{Q}}[f(Y)] +$$