Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Start deprecating old stuff #143

Merged
merged 7 commits into from
Mar 25, 2024
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
45 changes: 0 additions & 45 deletions Dockerfile

This file was deleted.

5 changes: 1 addition & 4 deletions deepaas/api/v2/predict.py
Original file line number Diff line number Diff line change
Expand Up @@ -16,7 +16,6 @@

from aiohttp import web
import aiohttp_apispec
import marshmallow
from webargs import aiohttpparser
import webargs.core

Expand All @@ -39,9 +38,7 @@ def _get_handler(model_name, model_obj):
accept = aux.get("accept", None)
if accept:
accept.validate.choices.append("*/*")
# If no default value use first possible choice:
if isinstance(accept.missing, marshmallow.utils._Missing):
accept.missing = accept.validate.choices[0]
accept.load_default = accept.validate.choices[0]
accept.location = "headers"

handler_args = webargs.core.dict2schema(aux)
Expand Down
9 changes: 5 additions & 4 deletions deepaas/cmd/cli.py
Original file line number Diff line number Diff line change
Expand Up @@ -34,9 +34,11 @@
from oslo_log import log

# from deepaas import config
from deepass import config
from deepaas.model import loading
from deepaas.model.v2 import wrapper as v2_wrapper

CONF = config.CONF

debug_cli = False

Expand Down Expand Up @@ -152,15 +154,14 @@ def _get_model_name(model_name=None):
sys.stderr.write(
"[ERROR]: There are several models available ({}).\n"
"You have to choose one and set it in the DEEPAAS_V2_MODEL "
"environment setting.\n".format(list(models.keys()))
"environment variable or using the --mode-name option"
".\n".format(list(models.keys()))
)
sys.exit(1)


# Get the model name
model_name = None
if "DEEPAAS_V2_MODEL" in os.environ:
model_name = os.environ["DEEPAAS_V2_MODEL"]
model_name = CONF.model_name

model_name, model_obj = _get_model_name(model_name)

Expand Down
11 changes: 0 additions & 11 deletions deepaas/cmd/execute.py
Original file line number Diff line number Diff line change
Expand Up @@ -26,17 +26,6 @@
from deepaas.model.v2.wrapper import UploadedFile

cli_opts = [
cfg.StrOpt(
"model-name",
help="""
Add the name of the model from which you want
to obtain the prediction.
If there are multiple models installed and youd don't
specify the name of the one you want to use the program will fail.
If there is only one model installed, that will be used
to make the prediction.
""",
),
cfg.StrOpt(
"input-file",
short="i",
Expand Down
14 changes: 14 additions & 0 deletions deepaas/config.py
Original file line number Diff line number Diff line change
Expand Up @@ -15,6 +15,7 @@
# under the License.

import logging
import os
import warnings

from oslo_config import cfg
Expand Down Expand Up @@ -85,6 +86,19 @@
Pre-warm the modules (eg. load models, do preliminary checks, etc). You might
want to disable this option if DEEPaaS is loading more than one module because
you risk getting out of memory errors.
""",
),
cfg.StrOpt(
"model-name",
default=os.environ.get("DEEPAAS_V2_MODEL", ""),
help="""
Specify the model to be used. If not specified, DEEPaaS will serve all the models that
are available. If specified, DEEPaaS will serve only the specified model. You can also
use the DEEPAAS_V2_MODEL environment variable.

WARNING: Serving multiple models is deprecated and will be removed in the future,
therefore it is strongly suggested that you specify the model you want to
or that you ensure that only one model is available.
""",
),
]
Expand Down
19 changes: 19 additions & 0 deletions deepaas/exceptions.py
Original file line number Diff line number Diff line change
@@ -0,0 +1,19 @@
# -*- coding: utf-8 -*-

# Copyright 2018 Spanish National Research Council (CSIC)
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.


class ModuleNotFoundError(Exception):
"""Module not found error."""
24 changes: 24 additions & 0 deletions deepaas/model/loading.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,13 +14,37 @@
# License for the specific language governing permissions and limitations
# under the License.

from deepaas import exceptions

import stevedore

NAMESPACES = {
"v2": "deepaas.v2.model",
}


def get_model_by_name(name, version):
"""Get a model by its name.

:param name: The name of the model.
:type name: str
:param version: The version of the model.
:type version: str

:returns: The model.
:rtype: object
"""
mgr = stevedore.NamedExtensionManager(
namespace=NAMESPACES.get(version),
names=[name],
)
if name not in mgr.names():
raise exceptions.ModuleNotFoundError(
"Model '%s' not found in namespace '%s'" % (name, NAMESPACES.get(version))
)
return mgr[name].plugin


def get_available_model_names(version):
"""Get the names of all the models that are available on the system.

Expand Down
40 changes: 38 additions & 2 deletions deepaas/model/v2/__init__.py
Original file line number Diff line number Diff line change
Expand Up @@ -14,14 +14,20 @@
# License for the specific language governing permissions and limitations
# under the License.

import warnings

from oslo_log import log

from deepaas import config
from deepaas import exceptions
from deepaas.model import loading
from deepaas.model.v2 import test
from deepaas.model.v2 import wrapper

LOG = log.getLogger(__name__)

CONF = config.CONF

# Model registry
MODELS = {}
MODELS_LOADED = False
Expand All @@ -35,17 +41,47 @@ def register_models(app):
return

try:
for name, model in loading.get_available_models("v2").items():
MODELS[name] = wrapper.ModelWrapper(name, model, app)
if CONF.model_name:
MODELS[CONF.model_name] = wrapper.ModelWrapper(
CONF.model_name,
loading.get_model_by_name(CONF.model_name, "v2"),
app,
)
else:
for name, model in loading.get_available_models("v2").items():
MODELS[name] = wrapper.ModelWrapper(name, model, app)
except exceptions.ModuleNotFoundError:
LOG.error("Model not found: %s", CONF.model_name)
raise
except Exception as e:
# We do not raise here, as we have not yet removed the deprecated loading of the
# test module... but we should remove it as soon as the code below is deprecated
LOG.warning("Error loading models: %s", e)
warnings.warn(
"Error loading models, using test model. This will be deprecated soon.",
DeprecationWarning,
)

if MODELS:
if len(MODELS) > 1:
# Loading several models will be deprecated in the future
warn_msg = "Loading several models is deprecated."
warnings.warn(warn_msg, DeprecationWarning)
LOG.warning(warn_msg)

MODELS_LOADED = True
return

if not MODELS:
# Raise deprecation warning
warn_msg = (
"Using the built-in test model is deprecated, if you are testing the "
"API, please use the demo_app instead. "
"Check https://github.com/deephdc/demo_app for more information.",
)
warnings.warn(warn_msg, DeprecationWarning)
LOG.info("No models found in V2, loading test model")
LOG.warning(warn_msg)
MODELS["deepaas-test"] = wrapper.ModelWrapper(
"deepaas-test", test.TestModel(), app
)
Expand Down
43 changes: 28 additions & 15 deletions deepaas/model/v2/test.py
Original file line number Diff line number Diff line change
Expand Up @@ -111,40 +111,53 @@ def train(self, *args, **kwargs):
def get_predict_args(self):
return {
"data": fields.Field(
description="Data file to perform inference.",
metadata={
"description": "Data file to perform inference.",
"location": "form",
"type": "file",
},
required=True,
location="form",
type="file",
),
"parameter": fields.Int(
description="This is a parameter for prediction", required=True
metadata={"description": "This is a parameter for prediction"},
required=True,
),
"parameter_three": fields.Str(
description=(
"This is a parameter that forces its value to "
"be one of the choices declared in 'enum'"
),
enum=["foo", "bar"],
metadata={
"description": (
"This is a parameter that forces its value to "
"be one of the choices declared in 'enum'"
),
"enum": ["foo", "bar"],
},
validate=validate.OneOf(["foo", "bar"]),
),
"accept": fields.Str(
description=(
"Media type(s) that is/are acceptable for the " "response."
),
metadata={
"description": (
"Media type(s) that is/are acceptable for the " "response."
),
"location": "headers",
},
validate=validate.OneOf(
["application/json", "text/plain", "image/png"]
),
location="headers",
),
}

def get_train_args(self):
return {
"sleep": fields.Int(
required=True,
descripton="This is a integer parameter, and it is " "a required one.",
metadata={
"descripton": (
"This is a integer parameter, and it is " "a required one."
),
},
),
"parameter_two": fields.Str(
metadata={"description": "This is a string parameter."}
),
"parameter_two": fields.Str(description="This is a string parameter."),
}

def get_metadata(self):
Expand Down
1 change: 0 additions & 1 deletion deepaas/tests/base.py
Original file line number Diff line number Diff line change
Expand Up @@ -32,7 +32,6 @@


class TestCase(testtools.TestCase, aiohttp.test_utils.AioHTTPTestCase):

"""Base unit test class."""

async def get_application(self):
Expand Down
Loading
Loading