Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Transfer-learning allow additional fragtypes #421

Draft
wants to merge 2 commits into
base: main
Choose a base branch
from
Draft
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
2 changes: 1 addition & 1 deletion alphadia/libtransform.py
Original file line number Diff line number Diff line change
Expand Up @@ -320,7 +320,7 @@ def forward(self, input: SpecLibBase) -> SpecLibBase:

device = utils.get_torch_device(self.use_gpu)

model_mgr = ModelManager(device=device)
model_mgr = ModelManager(device=device, charged_frag_types=charged_frag_types)

# will load other model than default generic
if self.peptdeep_model_type:
Expand Down
2 changes: 2 additions & 0 deletions alphadia/outputtransform.py
Original file line number Diff line number Diff line change
Expand Up @@ -421,6 +421,8 @@ def build_transfer_model(self, save=True):
max_lr=self.config["transfer_learning"]["max_lr"],
nce=self.config["transfer_learning"]["nce"],
instrument=self.config["transfer_learning"]["instrument"],
fragment_types=self.config["transfer_library"]["fragment_types"],
max_charge=self.config["transfer_library"]["max_charge"],
)
rt_stats = tune_mgr.finetune_rt(transfer_lib.precursor_df)
charge_stats = tune_mgr.finetune_charge(transfer_lib.precursor_df)
Expand Down
12 changes: 9 additions & 3 deletions alphadia/transferlearning/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,7 +3,7 @@
import numpy as np
import pandas as pd
import torch
from alphabase.peptide.fragment import remove_unused_fragments
from alphabase.peptide.fragment import get_charged_frag_types, remove_unused_fragments
from alphabase.peptide.mobility import ccs_to_mobility_for_df, mobility_to_ccs_for_df
from alphabase.peptide.precursor import refine_precursor_df
from peptdeep.model.charge import ChargeModelForModAASeq
Expand Down Expand Up @@ -218,8 +218,9 @@ def __init__(
max_lr: float = 0.0005,
nce: float = 25,
instrument: str = "Lumos",
fragment_types: list[str] | None = None,
max_charge: int | None = None,
):
super().__init__(mask_modloss, device)
self._test_interval = test_interval
self._train_fraction = train_fraction
self._validation_fraction = validation_fraction
Expand All @@ -233,11 +234,16 @@ def __init__(

self.device = device
self.early_stopping = EarlyStopping(patience=(lr_patience // test_interval) * 4)

self.charged_frag_types = (
get_charged_frag_types(fragment_types, max_charge)
if fragment_types
else None
)
assert (
self._train_fraction + self._validation_fraction + self._test_fraction
<= 1.0
), "The sum of the train, validation and test fractions should be less than or equal to 1.0"
super().__init__(mask_modloss, device, self.charged_frag_types)
Copy link
Collaborator

Choose a reason for hiding this comment

The reason will be displayed to describe this comment to others. Learn more.

I would prefer calling this init still at the top..

def ...(..):
    charged_frag_types = (
            get_charged_frag_types(fragment_types, max_charge)
            if fragment_types
            else None
        )
   super().__init__(mask_modloss, device, charged_frag_types)

   self.charged_frag_types = charged_frag_types
...

I did not find a proper resource, but I feel the parent init should be called "as early as possible"
https://stackoverflow.com/a/77975817


def _reset_frag_idx(self, df):
"""
Expand Down
480 changes: 480 additions & 0 deletions nbs/tutorial_nbs/additional_frags.ipynb

Large diffs are not rendered by default.

Loading