Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Multiple options for repulsive normalization #497

Merged
merged 1 commit into from
Mar 8, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
14 changes: 13 additions & 1 deletion src/gnn_tracking/metrics/losses/metric_learning.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,7 @@ def _hinge_loss_components(
p_attr: float,
p_rep: float,
n_hits_oi: int,
normalization: str,
) -> tuple[T, T]:
eps = 1e-9

Expand All @@ -34,7 +35,13 @@ def _hinge_loss_components(
# increasingly harder.
# The maximal number of edges that can be in the radius graph is proportional
# to the number of hits of interest, so we normalize by this number.
norm_rep = n_hits_oi + eps
if normalization == "n_rep_edges":
norm_rep = rep_edges.shape[1] + eps
elif normalization == "n_hits_oi":
norm_rep = n_hits_oi + eps
else:
msg = f"Normalization {normalization} not recognized."
raise ValueError(msg)
v_rep = torch.sum(r_emb_hinge - torch.pow(dists_rep, p_rep)) / norm_rep

return v_att, v_rep
Expand All @@ -52,6 +59,7 @@ def __init__(
max_eta: float = 4.0,
p_attr: float = 1.0,
p_rep: float = 1.0,
rep_normalization: str = "n_hits_oi",
):
"""Loss for graph construction using metric learning.

Expand All @@ -64,6 +72,9 @@ def __init__(
max_eta: maximum eta for particles of interest
p_attr: Power for the attraction term (default 1: linear loss)
p_rep: Power for the repulsion term (default 1: linear loss)
normalization: Normalization for the repulsive term. Can be either
"n_rep_edges" (normalizes by the number of repulsive edges) or
"n_hits_oi" (normalizes by the number of hits of interest).
"""
super().__init__()
self.save_hyperparameters()
Expand Down Expand Up @@ -131,6 +142,7 @@ def forward(
p_attr=self.hparams.p_attr,
p_rep=self.hparams.p_rep,
n_hits_oi=n_hits_oi,
normalization=self.hparams.rep_normalization,
)
losses = {
"attractive": attr,
Expand Down
6 changes: 6 additions & 0 deletions tests/test_losses.py
Original file line number Diff line number Diff line change
Expand Up @@ -197,6 +197,12 @@ def test_hinge_loss():
)


def test_hinge_loss_legacy():
assert get_ml_loss(
GraphConstructionHingeEmbeddingLoss(rep_normalization="n_rep_edges"), td1
) == approx({"attractive": 0.7307405975481213, "repulsive": 0.34612957938781874})


if __name__ == "__main__":
for strategy in ["tiger", "rg"]:
print(f"{strategy=}")
Expand Down
Loading