Skip to content

Commit

Permalink
Removed nullcontext for python 3.6 compatibility
Browse files Browse the repository at this point in the history
  • Loading branch information
Thilina Rajapakse committed Aug 4, 2020
1 parent 9213f09 commit 90db61d
Show file tree
Hide file tree
Showing 69 changed files with 369 additions and 329 deletions.
26 changes: 8 additions & 18 deletions README.md
Original file line number Diff line number Diff line change
Expand Up @@ -1644,6 +1644,14 @@ Note, you must set `evaluate_generated_text` to `True` to evaluate generated seq
import logging

import pandas as pd
import sklearn

from simpletransformers.classification import ClassificationModel
from simpletransformers.classification.multi_modal_classification_model import \
MultiModalClassificationModel
from simpletransformers.experimental.classification import ClassificationModel
from simpletransformers.language_representation import RepresentationModel
from simpletransformers.seq2seq import Seq2SeqModel
from simpletransformers.t5 import T5Model

logging.basicConfig(level=logging.INFO)
Expand Down Expand Up @@ -1952,10 +1960,7 @@ The prediction data should be a list of strings.
The `Seq2SeqModel` must be initialized with `encoder_decoder_type="bart"` and `encoder_decoder_name` set to a pre-trained model name or the path to a saved model directory.

```python
import logging

import pandas as pd
from simpletransformers.seq2seq import Seq2SeqModel

logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
Expand Down Expand Up @@ -2025,10 +2030,7 @@ The `Seq2SeqModel` must be initialized with `encoder_decoder_type="marian"` and
Everything else is identical to the Bart model usage.

```python
import logging

import pandas as pd
from simpletransformers.seq2seq import Seq2SeqModel

logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
Expand Down Expand Up @@ -2074,10 +2076,7 @@ for en, de in zip(src, predictions):
#### Generic Encoder-Decoder minimal start

```python
import logging

import pandas as pd
from simpletransformers.seq2seq import Seq2SeqModel

logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
Expand Down Expand Up @@ -2794,7 +2793,6 @@ If `label_list` is not given, `num_labels` is required and the labels should be
Create a `MultiModalClassificationModel`.

```python
from simpletransformers.classification.multi_modal_classification_model import MultiModalClassificationModel


model = MultiModalClassificationModel("bert", "bert-base-uncased")
Expand Down Expand Up @@ -2929,7 +2927,6 @@ For more complete examples of how to use this component with downstream tasks re
### Minimal example for generating word embeddings
Generate a list of contextual word embeddings for every sentence in a list
```python
from simpletransformers.language_representation import RepresentationModel

sentences = ["Example sentence 1", "Example sentence 2"]
model = RepresentationModel(
Expand All @@ -2944,7 +2941,6 @@ assert word_vectors.shape === (2, 5, 768) # token vector for every token in each
### Minimal example for generating sentence embeddings
Same code as for generating word embeddings, the only differennce is that we pass combine_s`trategy="mean" parameter to `combine_strategy="mean"
```python
from simpletransformers.language_representation import RepresentationModel
sentences = ["Example sentence 1", "Example sentence 2"]
model = RepresentationModel(
model_type="bert",
Expand Down Expand Up @@ -2973,8 +2969,6 @@ Regression can be used with either single sentence or sentence pair tasks.
#### Minimal Start for Regression

```python
from simpletransformers.classification import ClassificationModel
import pandas as pd


train_data = [
Expand Down Expand Up @@ -3039,7 +3033,6 @@ _[Back to Table of Contents](#table-of-contents)_
To use experimental features, import from `simpletransformers.experimental.X`

```python
from simpletransformers.experimental.classification import ClassificationModel
```

### Sliding Window For Long Sequences
Expand All @@ -3060,9 +3053,6 @@ Currently available on binary and multiclass classification models of the follow
Set `sliding_window` to `True` for the ClassificationModel to enable this feature.

```python
from simpletransformers.classification import ClassificationModel
import pandas as pd
import sklearn

# Train and Evaluation data needs to be in a Pandas Dataframe of two columns. The first column is the text with type str, and the second column in the label with type int.
train_data = [['Example sentence belonging to class 1' * 50, 1], ['Example sentence belonging to class 0', 0], ['Example 2 sentence belonging to class 0', 0]] + [['Example sentence belonging to class 0', 0] for i in range(12)]
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

from utils import load_rte_data_file


# Preparing train data
train_df = load_rte_data_file("data/train.jsonl")
eval_df = load_rte_data_file("data/val.jsonl")
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,9 +2,9 @@
from statistics import mean

import pandas as pd
import wandb
from sklearn.metrics import accuracy_score

import wandb
from simpletransformers.classification import ClassificationArgs, ClassificationModel
from utils import load_rte_data_file

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,10 +2,10 @@
from statistics import mean, mode

import pandas as pd
import wandb
from sklearn.metrics import accuracy_score, f1_score
from sklearn.model_selection import train_test_split

import wandb
from simpletransformers.classification import ClassificationArgs, ClassificationModel
from utils import load_rte_data_file

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -2,13 +2,12 @@
from statistics import mean

import pandas as pd
import wandb
from sklearn.metrics import accuracy_score

import wandb
from simpletransformers.classification import ClassificationArgs, ClassificationModel
from utils import load_rte_data_file


logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
transformers_logger.setLevel(logging.WARNING)
Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@

import pandas as pd
import prettyprinter
import wandb
from prettyprinter import pprint
from sklearn.metrics import accuracy_score

import wandb
from simpletransformers.classification import ClassificationArgs, ClassificationModel
from utils import load_rte_data_file

Expand Down
Original file line number Diff line number Diff line change
Expand Up @@ -3,10 +3,10 @@

import pandas as pd
import prettyprinter
import wandb
from prettyprinter import pprint
from sklearn.metrics import accuracy_score, f1_score

import wandb
from simpletransformers.classification import ClassificationArgs, ClassificationModel
from utils import load_rte_data_file

Expand Down
7 changes: 2 additions & 5 deletions examples/hyperparameter tuning/sweeps.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,12 +2,9 @@

import pandas as pd
import sklearn

import wandb
from simpletransformers.classification import (
ClassificationArgs,
ClassificationModel,
)

from simpletransformers.classification import ClassificationArgs, ClassificationModel

sweep_config = {
"method": "bayes", # grid, random
Expand Down
1 change: 0 additions & 1 deletion examples/language_generation/data_prep.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
import pandas as pd


df = pd.read_csv("data/cs.AI.tsv", sep="\t")
abstracts = df["abstract"].tolist()

Expand Down
2 changes: 1 addition & 1 deletion examples/language_generation/fine_tune.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
from simpletransformers.language_modeling import LanguageModelingModel
import logging

from simpletransformers.language_modeling import LanguageModelingModel

logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
Expand Down
2 changes: 1 addition & 1 deletion examples/language_generation/generate.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import logging
from simpletransformers.language_generation import LanguageGenerationModel

from simpletransformers.language_generation import LanguageGenerationModel

logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
Expand Down
4 changes: 2 additions & 2 deletions examples/language_generation/train_new_lm.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,7 @@
from simpletransformers.language_modeling import LanguageModelingModel
import logging
import argparse
import logging

from simpletransformers.language_modeling import LanguageModelingModel

logging.basicConfig(level=logging.INFO)
transformers_logger = logging.getLogger("transformers")
Expand Down
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import pandas as pd
from sklearn.metrics import classification_report
from sklearn.linear_model import RidgeClassifier
from sklearn.metrics import classification_report

from simpletransformers.language_representation import RepresentationModel

train_data = [["Example sentence belonging to class 1", 1], ["Example sentence belonging to class 0", 0]]
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,10 @@
from os.path import dirname, join

import pandas as pd
from sklearn.metrics import classification_report
from sklearn.linear_model import RidgeClassifier
from sklearn.metrics import classification_report

from simpletransformers.language_representation import RepresentationModel
from os.path import dirname, join

project_root = dirname(dirname(dirname(dirname(__file__)))) # path to root of the project

Expand Down
5 changes: 3 additions & 2 deletions examples/named_entity_recognition/named_entity_recognition.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,9 @@
import pandas as pd
from simpletransformers.ner import NERModel
import numpy as np
import pandas as pd
from scipy.special import softmax

from simpletransformers.ner import NERModel

# Creating train_df and eval_df for demonstration
train_data = [
[0, "Simple", "B-MISC"],
Expand Down
1 change: 1 addition & 0 deletions examples/seq2seq/minimal_seq2seq.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,7 @@
import logging

import pandas as pd

from simpletransformers.seq2seq import Seq2SeqModel

logging.basicConfig(level=logging.INFO)
Expand Down
3 changes: 2 additions & 1 deletion examples/t5/mixed_tasks/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,10 +6,11 @@
import numpy as np
import pandas as pd
from scipy.stats import pearsonr, spearmanr
from simpletransformers.t5 import T5Model
from sklearn.metrics import accuracy_score, f1_score
from transformers.data.metrics.squad_metrics import compute_exact, compute_f1

from simpletransformers.t5 import T5Model


def f1(truths, preds):
return mean([compute_f1(truth, pred) for truth, pred in zip(truths, preds)])
Expand Down
2 changes: 1 addition & 1 deletion examples/t5/mixed_tasks/train.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,6 @@
import pandas as pd
from simpletransformers.t5 import T5Model

from simpletransformers.t5 import T5Model

train_df = pd.read_csv("data/train.tsv", sep="\t").astype(str)
eval_df = pd.read_csv("data/eval.tsv", sep="\t").astype(str)
Expand Down
5 changes: 3 additions & 2 deletions examples/t5/training_on_a_new_task/data_prep.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
import pandas as pd
import gzip
from sklearn.model_selection import train_test_split
import os

import pandas as pd
from sklearn.model_selection import train_test_split
from tqdm.auto import tqdm


Expand Down
1 change: 0 additions & 1 deletion examples/t5/training_on_a_new_task/predict.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,5 @@
from simpletransformers.t5 import T5Model


model_args = {
"reprocess_input_data": True,
"overwrite_output_dir": True,
Expand Down
5 changes: 3 additions & 2 deletions examples/t5/training_on_a_new_task/test.py
Original file line number Diff line number Diff line change
@@ -1,7 +1,8 @@
from simpletransformers.t5 import T5Model
import pandas as pd
from pprint import pprint

import pandas as pd

from simpletransformers.t5 import T5Model

model_args = {
"reprocess_input_data": True,
Expand Down
1 change: 0 additions & 1 deletion examples/t5/training_on_a_new_task/train.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,6 @@

from simpletransformers.t5 import T5Model


train_df = pd.read_csv("data/train_df.tsv", sep="\t").astype(str)
eval_df = pd.read_csv("data/eval_df.tsv", sep="\t").astype(str)

Expand Down
1 change: 1 addition & 0 deletions examples/text_classification/binary_classification.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import pandas as pd

from simpletransformers.classification import ClassificationModel

# Train and Evaluation data needs to be in a Pandas Dataframe of two columns. The first column is the text with type str, and the second column is the label with type int.
Expand Down
2 changes: 1 addition & 1 deletion examples/text_classification/lazy_loading_regression.py
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import os

import pandas as pd
from simpletransformers.classification import ClassificationModel

from simpletransformers.classification import ClassificationModel

train_data = [
["Example sentence belonging to class 1", "Yep, this is 1", 0.8],
Expand Down
1 change: 1 addition & 0 deletions examples/text_classification/multiclass_classification.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import pandas as pd

from simpletransformers.classification import ClassificationModel

# Train and Evaluation data needs to be in a Pandas Dataframe containing at least two columns. If the Dataframe has a header, it should contain a 'text' and a 'labels' column. If no header is present, the Dataframe should contain at least two columns, with the first column is the text with type str, and the second column in the label with type int.
Expand Down
1 change: 1 addition & 0 deletions examples/text_classification/multilabel_classification.py
Original file line number Diff line number Diff line change
@@ -1,4 +1,5 @@
import pandas as pd

from simpletransformers.classification import MultiLabelClassificationModel

# Train and Evaluation data needs to be in a Pandas Dataframe containing at least two columns, a 'text' and a 'labels' column. The `labels` column should contain multi-hot encoded lists.
Expand Down
Original file line number Diff line number Diff line change
@@ -1,8 +1,8 @@
import sys

import pandas as pd
from simpletransformers.classification import ClassificationModel

from simpletransformers.classification import ClassificationModel

prefix = "data/"

Expand Down
2 changes: 1 addition & 1 deletion requirements-dev.txt
Original file line number Diff line number Diff line change
Expand Up @@ -6,4 +6,4 @@ pytest==5.1.3
# lint/format/types
black==19.10b0
flake8==3.7.8
pytype==2019.7.11
pytype==2019.7.11
2 changes: 1 addition & 1 deletion setup.py
Original file line number Diff line number Diff line change
Expand Up @@ -5,7 +5,7 @@

setup(
name="simpletransformers",
version="0.46.5",
version="0.46.6",
author="Thilina Rajapakse",
author_email="[email protected]",
description="An easy-to-use wrapper library for the Transformers library.",
Expand Down
8 changes: 5 additions & 3 deletions simpletransformers/classification/__init__.py
Original file line number Diff line number Diff line change
@@ -1,6 +1,8 @@
from simpletransformers.classification.classification_model import ClassificationModel
from simpletransformers.classification.multi_label_classification_model import MultiLabelClassificationModel
from simpletransformers.classification.multi_modal_classification_model import MultiModalClassificationModel
from simpletransformers.config.model_args import ClassificationArgs
from simpletransformers.config.model_args import MultiLabelClassificationArgs
from simpletransformers.config.model_args import MultiModalClassificationArgs
from simpletransformers.config.model_args import (
ClassificationArgs,
MultiLabelClassificationArgs,
MultiModalClassificationArgs,
)
Loading

0 comments on commit 90db61d

Please sign in to comment.