Skip to content

Commit 71f75d6

Browse files
committed
fix formatting issues
1 parent 1bebdf2 commit 71f75d6

File tree

16 files changed

+58
-62
lines changed

16 files changed

+58
-62
lines changed

.pre-commit-config.yaml

+3-5
Original file line numberDiff line numberDiff line change
@@ -55,7 +55,7 @@ repos:
5555
name: isort (python)
5656

5757
- repo: https://github.com/ambv/black
58-
rev: '23.10.1'
58+
rev: '24.10.0'
5959
hooks:
6060
- id: black
6161

@@ -66,21 +66,19 @@ repos:
6666
args: [--exit-non-zero-on-fix]
6767

6868
- repo: https://github.com/pycqa/flake8
69-
rev: '6.1.0'
69+
rev: '7.1.1'
7070
hooks:
7171
- id: flake8
7272
additional_dependencies:
73-
- flake8-docstrings
7473
- flake8-broken-line
7574
- flake8-bugbear
7675
- flake8-comprehensions
7776
- flake8-debugger
7877
- flake8-string-format
7978
args:
80-
- --docstring-convention=numpy
8179
- --max-line-length=120
8280
- --extend-immutable-calls=Query,fastapi.Depends,fastapi.params.Depends
83-
- --ignore=B008 # Ignore error for function calls in argument defaults
81+
- --ignore=B008,E203 # Ignore error for function calls in argument defaults
8482
exclude: ^(__init__.py$|.*\/__init__.py$)
8583

8684

lambda/models/domain_objects.py

+6-6
Original file line numberDiff line numberDiff line change
@@ -98,7 +98,7 @@ class AutoScalingConfig(BaseModel):
9898
defaultInstanceWarmup: PositiveInt
9999
metricConfig: MetricConfig
100100

101-
@model_validator(mode="after") # type: ignore
101+
@model_validator(mode="after")
102102
def validate_auto_scaling_config(self) -> Self:
103103
"""Validate autoScalingConfig values."""
104104
if self.minCapacity > self.maxCapacity:
@@ -115,7 +115,7 @@ class AutoScalingInstanceConfig(BaseModel):
115115
maxCapacity: Optional[PositiveInt] = None
116116
desiredCapacity: Optional[PositiveInt] = None
117117

118-
@model_validator(mode="after") # type: ignore
118+
@model_validator(mode="after")
119119
def validate_auto_scaling_instance_config(self) -> Self:
120120
"""Validate autoScalingInstanceConfig values."""
121121
config_fields = [self.minCapacity, self.maxCapacity, self.desiredCapacity]
@@ -155,7 +155,7 @@ class ContainerConfig(BaseModel):
155155
healthCheckConfig: ContainerHealthCheckConfig
156156
environment: Optional[Dict[str, str]] = {}
157157

158-
@field_validator("environment") # type: ignore
158+
@field_validator("environment")
159159
@classmethod
160160
def validate_environment(cls, environment: Dict[str, str]) -> Dict[str, str]:
161161
"""Validate that all keys in Dict are not empty."""
@@ -201,7 +201,7 @@ class CreateModelRequest(BaseModel):
201201
modelUrl: Optional[str] = None
202202
streaming: Optional[bool] = False
203203

204-
@model_validator(mode="after") # type: ignore
204+
@model_validator(mode="after")
205205
def validate_create_model_request(self) -> Self:
206206
"""Validate whole request object."""
207207
# Validate that an embedding model cannot be set as streaming-enabled
@@ -252,7 +252,7 @@ class UpdateModelRequest(BaseModel):
252252
modelType: Optional[ModelType] = None
253253
streaming: Optional[bool] = None
254254

255-
@model_validator(mode="after") # type: ignore
255+
@model_validator(mode="after")
256256
def validate_update_model_request(self) -> Self:
257257
"""Validate whole request object."""
258258
fields = [
@@ -273,7 +273,7 @@ def validate_update_model_request(self) -> Self:
273273
raise ValueError("Embedding model cannot be set with streaming enabled.")
274274
return self
275275

276-
@field_validator("autoScalingInstanceConfig") # type: ignore
276+
@field_validator("autoScalingInstanceConfig")
277277
@classmethod
278278
def validate_autoscaling_instance_config(cls, config: AutoScalingInstanceConfig) -> AutoScalingInstanceConfig:
279279
"""Validate that the AutoScaling instance config has at least one positive value."""

lambda/models/lambda_functions.py

+14-14
Original file line numberDiff line numberDiff line change
@@ -59,32 +59,32 @@
5959
stepfunctions = boto3.client("stepfunctions", region_name=os.environ["AWS_REGION"], config=retry_config)
6060

6161

62-
@app.exception_handler(ModelNotFoundError) # type: ignore
62+
@app.exception_handler(ModelNotFoundError)
6363
async def model_not_found_handler(request: Request, exc: ModelNotFoundError) -> JSONResponse:
6464
"""Handle exception when model cannot be found and translate to a 404 error."""
6565
return JSONResponse(status_code=404, content={"message": str(exc)})
6666

6767

68-
@app.exception_handler(RequestValidationError) # type: ignore
69-
async def validation_exception_handler(request: Request, exc: RequestValidationError):
68+
@app.exception_handler(RequestValidationError)
69+
async def validation_exception_handler(request: Request, exc: RequestValidationError) -> JSONResponse:
7070
"""Handle exception when request fails validation and and translate to a 422 error."""
7171
return JSONResponse(
7272
status_code=422, content={"detail": jsonable_encoder(exc.errors()), "type": "RequestValidationError"}
7373
)
7474

7575

76-
@app.exception_handler(InvalidStateTransitionError) # type: ignore
77-
@app.exception_handler(ModelAlreadyExistsError) # type: ignore
78-
@app.exception_handler(ValueError) # type: ignore
76+
@app.exception_handler(InvalidStateTransitionError)
77+
@app.exception_handler(ModelAlreadyExistsError)
78+
@app.exception_handler(ValueError)
7979
async def user_error_handler(
8080
request: Request, exc: Union[InvalidStateTransitionError, ModelAlreadyExistsError, ValueError]
8181
) -> JSONResponse:
8282
"""Handle errors when customer requests options that cannot be processed."""
8383
return JSONResponse(status_code=400, content={"message": str(exc)})
8484

8585

86-
@app.post(path="", include_in_schema=False) # type: ignore
87-
@app.post(path="/") # type: ignore
86+
@app.post(path="", include_in_schema=False)
87+
@app.post(path="/")
8888
async def create_model(create_request: CreateModelRequest) -> CreateModelResponse:
8989
"""Endpoint to create a model."""
9090
create_handler = CreateModelHandler(
@@ -95,8 +95,8 @@ async def create_model(create_request: CreateModelRequest) -> CreateModelRespons
9595
return create_handler(create_request=create_request)
9696

9797

98-
@app.get(path="", include_in_schema=False) # type: ignore
99-
@app.get(path="/") # type: ignore
98+
@app.get(path="", include_in_schema=False)
99+
@app.get(path="/")
100100
async def list_models() -> ListModelsResponse:
101101
"""Endpoint to list models."""
102102
list_handler = ListModelsHandler(
@@ -107,7 +107,7 @@ async def list_models() -> ListModelsResponse:
107107
return list_handler()
108108

109109

110-
@app.get(path="/{model_id}") # type: ignore
110+
@app.get(path="/{model_id}")
111111
async def get_model(
112112
model_id: Annotated[str, Path(title="The unique model ID of the model to get")], request: Request
113113
) -> GetModelResponse:
@@ -120,7 +120,7 @@ async def get_model(
120120
return get_handler(model_id=model_id)
121121

122122

123-
@app.put(path="/{model_id}") # type: ignore
123+
@app.put(path="/{model_id}")
124124
async def update_model(
125125
model_id: Annotated[str, Path(title="The unique model ID of the model to update")],
126126
update_request: UpdateModelRequest,
@@ -134,7 +134,7 @@ async def update_model(
134134
return update_handler(model_id=model_id, update_request=update_request)
135135

136136

137-
@app.delete(path="/{model_id}") # type: ignore
137+
@app.delete(path="/{model_id}")
138138
async def delete_model(
139139
model_id: Annotated[str, Path(title="The unique model ID of the model to delete")], request: Request
140140
) -> DeleteModelResponse:
@@ -147,7 +147,7 @@ async def delete_model(
147147
return delete_handler(model_id=model_id)
148148

149149

150-
@app.get(path="/metadata/instances") # type: ignore
150+
@app.get(path="/metadata/instances")
151151
async def get_instances() -> list[str]:
152152
"""Endpoint to list available instances in this region."""
153153
return list(sess.get_service_model("ec2").shape_for("InstanceType").enum)

lib/serve/rest-api/src/api/endpoints/v1/embeddings.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@
2727
router = APIRouter()
2828

2929

30-
@router.post(f"/{RestApiResource.EMBEDDINGS.value}") # type: ignore
30+
@router.post(f"/{RestApiResource.EMBEDDINGS.value}")
3131
async def embeddings(request: EmbeddingsRequest) -> JSONResponse:
3232
"""Text embeddings."""
3333
response = await handle_embeddings(request.dict())

lib/serve/rest-api/src/api/endpoints/v1/generation.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -33,15 +33,15 @@
3333
router = APIRouter()
3434

3535

36-
@router.post(f"/{RestApiResource.GENERATE.value}") # type: ignore
36+
@router.post(f"/{RestApiResource.GENERATE.value}")
3737
async def generate(request: GenerateRequest) -> JSONResponse:
3838
"""Text generation."""
3939
response = await handle_generate(request.dict())
4040

4141
return JSONResponse(content=response, status_code=200)
4242

4343

44-
@router.post(f"/{RestApiResource.GENERATE_STREAM.value}") # type: ignore
44+
@router.post(f"/{RestApiResource.GENERATE_STREAM.value}")
4545
async def generate_stream(request: GenerateStreamRequest) -> StreamingResponse:
4646
"""Text generation with streaming."""
4747
return StreamingResponse(
@@ -50,7 +50,7 @@ async def generate_stream(request: GenerateStreamRequest) -> StreamingResponse:
5050
)
5151

5252

53-
@router.post(f"/{RestApiResource.OPENAI_CHAT_COMPLETIONS.value}") # type: ignore
53+
@router.post(f"/{RestApiResource.OPENAI_CHAT_COMPLETIONS.value}")
5454
async def openai_chat_completion_generate_stream(request: OpenAIChatCompletionsRequest) -> StreamingResponse:
5555
"""Text generation with streaming."""
5656
return StreamingResponse(
@@ -59,7 +59,7 @@ async def openai_chat_completion_generate_stream(request: OpenAIChatCompletionsR
5959
)
6060

6161

62-
@router.post(f"/{RestApiResource.OPENAI_COMPLETIONS.value}") # type: ignore
62+
@router.post(f"/{RestApiResource.OPENAI_COMPLETIONS.value}")
6363
async def openai_completion_generate_stream(request: OpenAICompletionsRequest) -> StreamingResponse:
6464
"""Text generation with streaming."""
6565
return StreamingResponse(

lib/serve/rest-api/src/api/endpoints/v1/models.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -33,7 +33,7 @@
3333
router = APIRouter()
3434

3535

36-
@router.get(f"/{RestApiResource.DESCRIBE_MODEL.value}") # type: ignore
36+
@router.get(f"/{RestApiResource.DESCRIBE_MODEL.value}")
3737
async def describe_model(
3838
provider: str = Query(
3939
None,
@@ -52,7 +52,7 @@ async def describe_model(
5252
return JSONResponse(content=response, status_code=200)
5353

5454

55-
@router.get(f"/{RestApiResource.DESCRIBE_MODELS.value}") # type: ignore
55+
@router.get(f"/{RestApiResource.DESCRIBE_MODELS.value}")
5656
async def describe_models(
5757
model_types: Optional[List[ModelType]] = Query(
5858
None,
@@ -69,7 +69,7 @@ async def describe_models(
6969
return JSONResponse(content=response, status_code=200)
7070

7171

72-
@router.get(f"/{RestApiResource.LIST_MODELS.value}") # type: ignore
72+
@router.get(f"/{RestApiResource.LIST_MODELS.value}")
7373
async def list_models(
7474
model_types: Optional[List[ModelType]] = Query(
7575
None,
@@ -86,7 +86,7 @@ async def list_models(
8686
return JSONResponse(content=response, status_code=200)
8787

8888

89-
@router.get(f"/{RestApiResource.OPENAI_LIST_MODELS.value}") # type: ignore
89+
@router.get(f"/{RestApiResource.OPENAI_LIST_MODELS.value}")
9090
async def openai_list_models() -> JSONResponse:
9191
"""List models for OpenAI Compatibility. Only returns TEXTGEN models."""
9292
response = await handle_openai_list_models()

lib/serve/rest-api/src/api/endpoints/v2/litellm_passthrough.py

+1-3
Original file line numberDiff line numberDiff line change
@@ -82,9 +82,7 @@ def generate_response(iterator: Iterator[Union[str, bytes]]) -> Iterator[str]:
8282
yield f"{line}\n\n"
8383

8484

85-
@router.api_route(
86-
"/{api_path:path}", methods=["GET", "POST", "OPTIONS", "PUT", "PATCH", "DELETE", "HEAD"]
87-
) # type: ignore
85+
@router.api_route("/{api_path:path}", methods=["GET", "POST", "OPTIONS", "PUT", "PATCH", "DELETE", "HEAD"])
8886
async def litellm_passthrough(request: Request, api_path: str) -> Response:
8987
"""
9088
Pass requests directly to LiteLLM. LiteLLM and deployed models will respond here directly.

lib/serve/rest-api/src/api/routes.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -40,7 +40,7 @@
4040
)
4141

4242

43-
@router.get("/health") # type: ignore
43+
@router.get("/health")
4444
async def health_check() -> JSONResponse:
4545
"""Health check path.
4646

lib/serve/rest-api/src/handlers/generation.py

+2-2
Original file line numberDiff line numberDiff line change
@@ -31,7 +31,7 @@ async def handle_generate(request_data: Dict[str, Any]) -> Dict[str, Any]:
3131
return response.dict() # type: ignore
3232

3333

34-
@handle_stream_exceptions # type: ignore
34+
@handle_stream_exceptions
3535
async def handle_generate_stream(request_data: Dict[str, Any]) -> AsyncGenerator[str, None]:
3636
"""Handle for generate_stream endpoint."""
3737
model, model_kwargs, text = await validate_and_prepare_llm_request(request_data, RestApiResource.GENERATE_STREAM)
@@ -57,7 +57,7 @@ def parse_model_provider_names(model_string: str) -> Tuple[str, str]:
5757
return model_name, provider
5858

5959

60-
@handle_stream_exceptions # type: ignore
60+
@handle_stream_exceptions
6161
async def handle_openai_generate_stream(
6262
request_data: Dict[str, Any], is_text_completion: bool = False
6363
) -> AsyncGenerator[str, None]:

lib/serve/rest-api/src/lisa_serve/ecs/textgen/tgi.py

+12-10
Original file line numberDiff line numberDiff line change
@@ -211,16 +211,18 @@ async def openai_generate_stream(
211211
object="text_completion" if is_text_completion else "chat.completion.chunk",
212212
system_fingerprint=fingerprint,
213213
choices=[
214-
OpenAICompletionsChoice(
215-
index=0,
216-
finish_reason=resp.details.finish_reason if resp.details else None,
217-
text=resp.token.text,
218-
)
219-
if is_text_completion
220-
else OpenAIChatCompletionsChoice(
221-
index=0,
222-
finish_reason=resp.details.finish_reason if resp.details else None,
223-
delta=OpenAIChatCompletionsDelta(content=resp.token.text, role="assistant"),
214+
(
215+
OpenAICompletionsChoice(
216+
index=0,
217+
finish_reason=resp.details.finish_reason if resp.details else None,
218+
text=resp.token.text,
219+
)
220+
if is_text_completion
221+
else OpenAIChatCompletionsChoice(
222+
index=0,
223+
finish_reason=resp.details.finish_reason if resp.details else None,
224+
delta=OpenAIChatCompletionsDelta(content=resp.token.text, role="assistant"),
225+
)
224226
)
225227
],
226228
)

lib/serve/rest-api/src/main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -147,7 +147,7 @@ async def lifespan(app: FastAPI): # type: ignore
147147
##############
148148

149149

150-
@app.middleware("http") # type: ignore
150+
@app.middleware("http")
151151
async def process_request(request: Request, call_next: Any) -> Any:
152152
"""Middleware for processing all HTTP requests."""
153153
event = "process_request"

lib/serve/rest-api/src/utils/generate_litellm_config.py

+2-4
Original file line numberDiff line numberDiff line change
@@ -25,10 +25,8 @@
2525
secrets_client = boto3.client("secretsmanager", region_name=os.environ["AWS_REGION"])
2626

2727

28-
@click.command() # type: ignore
29-
@click.option(
30-
"-f", "--filepath", type=click.Path(exists=True, file_okay=True, dir_okay=False, writable=True)
31-
) # type: ignore
28+
@click.command()
29+
@click.option("-f", "--filepath", type=click.Path(exists=True, file_okay=True, dir_okay=False, writable=True))
3230
def generate_config(filepath: str) -> None:
3331
"""Read LiteLLM configuration and rewrite it with LISA-deployed model information."""
3432
with open(filepath, "r") as fp:

lisa-sdk/lisapy/main.py

+1-1
Original file line numberDiff line numberDiff line change
@@ -51,7 +51,7 @@ class Lisa(BaseModel):
5151

5252
_session: Session
5353

54-
@field_validator("url") # type: ignore
54+
@field_validator("url")
5555
def validate_url(cls: "Lisa", v: str) -> str:
5656
"""Validate URL is properly formatted."""
5757
url = v.rstrip("/")

lisa-sdk/tests/test_client.py

+4-4
Original file line numberDiff line numberDiff line change
@@ -22,13 +22,13 @@
2222
from lisapy.types import ModelKwargs, ModelType
2323

2424

25-
@pytest.fixture(scope="session") # type: ignore
25+
@pytest.fixture(scope="session")
2626
def url(pytestconfig: pytest.Config) -> Any:
2727
"""Get the url argument."""
2828
return pytestconfig.getoption("url")
2929

3030

31-
@pytest.fixture(scope="session") # type: ignore
31+
@pytest.fixture(scope="session")
3232
def verify(pytestconfig: pytest.Config) -> Union[bool, Any]:
3333
"""Get the verify argument."""
3434
if pytestconfig.getoption("verify") == "false":
@@ -114,7 +114,7 @@ def test_generate_stream(url: str, verify: Union[bool, str]) -> None:
114114
assert response.generated_tokens == 1
115115

116116

117-
@pytest.mark.asyncio # type: ignore
117+
@pytest.mark.asyncio
118118
async def test_generate_async(url: str, verify: Union[bool, str]) -> None:
119119
"""Generates a batch async response from a textgen.tgi model."""
120120
client = Lisa(url=url, verify=verify)
@@ -127,7 +127,7 @@ async def test_generate_async(url: str, verify: Union[bool, str]) -> None:
127127
assert response.generated_tokens == 1
128128

129129

130-
@pytest.mark.asyncio # type: ignore
130+
@pytest.mark.asyncio
131131
async def test_generate_stream_async(url: str, verify: Union[bool, str]) -> None:
132132
"""Generates a streaming async response from a textgen.tgi model."""
133133
client = Lisa(url=url, verify=verify)

pyproject.toml

+1-1
Original file line numberDiff line numberDiff line change
@@ -27,7 +27,7 @@ skip_glob = [
2727
[tool.mypy]
2828
ignore_missing_imports = true
2929
disallow_untyped_defs = true
30-
disallow_untyped_decorators = true
30+
disallow_untyped_decorators = false
3131
disallow_incomplete_defs = true
3232
disallow_any_unimported = false
3333
no_implicit_optional = true

0 commit comments

Comments
 (0)