Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

feat: 음식 이미지 탐지 API 지연시간(Latency) 최적화 진행 후 개발환경 배포 #111

Merged
merged 7 commits into from
Feb 5, 2025
91 changes: 2 additions & 89 deletions server/apis/food_analysis.py
Original file line number Diff line number Diff line change
Expand Up @@ -6,17 +6,15 @@
from apscheduler.schedulers.background import BackgroundScheduler
from apscheduler.events import EVENT_JOB_EXECUTED, EVENT_JOB_ERROR
from operator import itemgetter
from langchain_openai import ChatOpenAI
from langchain.prompts import PromptTemplate
from langchain_core.runnables import RunnablePassthrough
from langchain_core.output_parsers import JsonOutputParser, StrOutputParser
from core.config import settings
from db.database import get_db
from db.models import AnalysisStatus
from db.crud import (create_eat_habits, get_user_data, get_all_member_id, get_last_weekend_meals,
add_analysis_status, update_analysis_status, create_diet_analysis)
from utils.file_handler import read_prompt
from utils.scheduler import scheduler_listener
from templates.prompt_template import (create_advice_chain, create_nutrition_analysis_chain, create_improvement_chain,
create_diet_recommendation_chain, create_summarize_chain, create_evaluation_chain)
from errors.server_exception import ExternalAPIError, FileAccessError, QueryError
from logs.logger_config import get_logger

Expand All @@ -26,10 +24,6 @@

# 공용 로거
logger = get_logger()

# Langchain 모델 설정: analysis / other
llm = ChatOpenAI(model='gpt-4o-mini', temperature=0, max_completion_tokens=250)
analysis_llm = ChatOpenAI(model='gpt-4o', temperature=0, max_completion_tokens=250)

# 정량적 평가 기준(임계값)
THRESHOLD_RELEVANCE= 3.0
Expand Down Expand Up @@ -89,86 +83,6 @@ def weight_predict(user_data: dict) -> str:
else:
return '감소'

# Prompt 템플릿 정의
def create_prompt_template(file_path, input_variables):
prompt_content = read_prompt(file_path)
return PromptTemplate(template=prompt_content, input_variables=input_variables)

# Chain 정의: 식습관 조언
def create_advice_chain():
prompt_path = os.path.join(settings.PROMPT_PATH, "diet_advice.txt")
prompt_template = create_prompt_template(
prompt_path,
input_variables=[
"gender", "age", "height", "weight", "physical_activity_index",
"carbohydrate", "protein", "fat", "carbo_avg", "protein_avg", "fat_avg"
]
)
return prompt_template | llm | JsonOutputParser()

# Chain 정의: 전체적인 영양소 분석
def create_nutrition_analysis_chain():
prompt_path = os.path.join(settings.PROMPT_PATH, "nutrition_analysis.txt")
prompt_template = create_prompt_template(
prompt_path,
input_variables=[
"gender", "age", "height", "weight",
"physical_activity_index", "carbohydrate", "protein", "fat",
"calorie", "sodium", "dietary_fiber", "sugars",
"carbo_avg", "protein_avg", "fat_avg", "tdee"
]
)
return prompt_template | analysis_llm | StrOutputParser()

# Chain 정의: 개선점
def create_improvement_chain():
prompt_path = os.path.join(settings.PROMPT_PATH, "diet_improvement.txt")
prompt_template = create_prompt_template(
prompt_path,
input_variables=[
"carbohydrate", "carbo_avg", "protein", "protein_avg",
"fat", "fat_avg", "calorie", "tdee", "nutrition_analysis", "target_weight"
]
)
return prompt_template | analysis_llm | StrOutputParser()

# Chain 정의: 맞춤형 식단 제공
def create_diet_recommendation_chain():
prompt_path = os.path.join(settings.PROMPT_PATH, "custom_recommendation.txt")
prompt_template = create_prompt_template(
prompt_path,
input_variables=[
"diet_improvement", "etc", "target_weight"
]
)
return prompt_template | analysis_llm | StrOutputParser()

# Chain 정의: 식습관 분석 요약
def create_summarize_chain():
prompt_path = os.path.join(settings.PROMPT_PATH, "diet_summary.txt")
prompt_template = create_prompt_template(
prompt_path,
input_variables=[
"nutrition_analysis", "diet_improvement", "custom_recommendation"
]
)
return prompt_template | llm | StrOutputParser()

# Chain 정의: 평가 체인
def create_evaluation_chain():
prompt_path = os.path.join(settings.PROMPT_PATH, "diet_eval.txt")
prompt_template = create_prompt_template(
prompt_path,
input_variables=[
"gender", "age", "height", "weight",
"physical_activity_index", "etc", "target_weight",
"carbohydrate", "protein", "fat",
"calorie", "sodium", "dietary_fiber", "sugars", "tdee",
"nutrition_analysis", "diet_improvement", "custom_recommendation", "diet_summary"
]
)
return prompt_template | llm | JsonOutputParser()

# Analysis Multi-Chain 연결
def create_multi_chain(input_data):
try:
Expand All @@ -177,7 +91,6 @@ def create_multi_chain(input_data):
improvement_chain = create_improvement_chain()
recommendation_chain = create_diet_recommendation_chain()
summary_chain = create_summarize_chain()
evaluate_chain = create_evaluation_chain()

# 체인 실행 흐름 정의
multi_chain = (
Expand Down
134 changes: 88 additions & 46 deletions server/apis/food_image.py
Original file line number Diff line number Diff line change
@@ -1,9 +1,10 @@
import os
import base64
import redis
import aiofiles
import time
from datetime import datetime, timedelta
from openai import OpenAI
from openai import AsyncOpenAI
from pinecone.grpc import PineconeGRPC as Pinecone
from core.config import settings
from errors.business_exception import RateLimitExceeded, ImageAnalysisError, ImageProcessingError
Expand Down Expand Up @@ -32,11 +33,20 @@
# 요청 제한 설정
RATE_LIMIT = settings.RATE_LIMIT # 하루 최대 요청 가능 횟수

# 프롬프트 캐싱
CACHE_TTL = 3600

# 공용 로거
logger = get_logger()

# Chatgpt API 사용
client = OpenAI(api_key = settings.OPENAI_API_KEY)
# OpenAI API 사용
client = AsyncOpenAI(api_key = settings.OPENAI_API_KEY)

# Upsage API 사용
upstage = AsyncOpenAI(
api_key = settings.UPSTAGE_API_KEY,
base_url="https://api.upstage.ai/v1/solar"
)

# Pinecone 설정
pc = Pinecone(api_key=settings.PINECONE_API_KEY)
Expand Down Expand Up @@ -84,26 +94,48 @@ async def process_image_to_base64(file):


# prompt를 불러오기
def read_prompt(filename):
with open(filename, 'r', encoding='utf-8') as file:
prompt = file.read().strip()
return prompt
async def read_prompt(filename):

# Redis에서 캐싱된 프롬프트 확인
cached_prompt = redis_client.get(f"prompt:{filename}")

if cached_prompt:
# logger.info(f"Redis 캐싱 프롬프트 사용: {filename}")
return cached_prompt

try:
async with aiofiles.open(filename, 'r', encoding='utf-8') as file:
prompt = (await file.read()).strip()

if not prompt:
logger.error("프롬프트 파일 비어있음")
raise FileAccessError()

# Redis에 프롬프트 캐싱(TTL : 1 hr)
redis_client.setex(f"prompt:{filename}", CACHE_TTL, prompt)
logger.info(f"Redis 프롬프트 캐싱 완료: {filename}")

return prompt

except Exception as e:
logger.error(f"프롬프트 파일 읽기 실패: {e}")
raise FileAccessError()


# 음식 이미지 분석 API: prompt_type은 함수명과 동일
def food_image_analyze(image_base64: str):
async def food_image_analyze(image_base64: str):

# prompt 타입 설정
prompt_file = os.path.join(settings.PROMPT_PATH, "food_image_analyze.txt")
prompt = read_prompt(prompt_file)
prompt_file = os.path.join(settings.PROMPT_PATH, "image_detection.txt")
prompt = await read_prompt(prompt_file)

# prompt 내용 없을 경우
if not prompt:
logger.error("food_image_analyze.txt에 prompt 내용 미존재")
logger.error("image_detection.txt에 prompt 내용 미존재")
raise FileAccessError()

# OpenAI API 호출
response = client.chat.completions.create(
response = await client.chat.completions.create(
model="gpt-4o",
messages=[
{
Expand All @@ -112,7 +144,7 @@ def food_image_analyze(image_base64: str):
{
"type": "image_url",
"image_url": {
"url": f"data:image/jpeg;base64,{image_base64}"
"url": f"data:image/jpeg;base64,{image_base64}",
# 성능이 좋아지지만, token 소모 큼(tradeoff): 검증 필요
# "detail": "high"
}
Expand All @@ -126,6 +158,7 @@ def food_image_analyze(image_base64: str):
)

result = response.choices[0].message.content
# print(result)

# 음식명(반환값)이 존재하지 않을 경우
if not result:
Expand All @@ -136,50 +169,59 @@ def food_image_analyze(image_base64: str):
return result


# 제공받은 음식의 벡터 임베딩 값 변환 작업 수행
def get_embedding(text, model="text-embedding-3-small"):
text = text.replace("\n", " ")
embedding = client.embeddings.create(input=[text], model=model).data[0].embedding
return embedding
# 제공받은 음식의 벡터 임베딩 값 변환 작업 수행(Upstage-Embedding 사용)
async def get_embedding(text, model="embedding-query"):
try:
text = text.replace("\n", " ")
response = await upstage.embeddings.create(input=[text], model=model)
return response.data[0].embedding
except Exception as e:
logger.error(f"텍스트 임베딩 변환 실패: {e}")
raise ExternalAPIError()


# 벡터 임베딩을 통한 유사도 분석 진행(Pinecone)
def search_similar_food(query_name, top_k=3, score_threshold=0.7):

async def search_similar_food(query_name, top_k=3, score_threshold=0.7, candidate_multiplier=2):
try:
query_vector = get_embedding(query_name)
except Exception as e:
logger.error(f"OpenAI API 텍스트 임베딩 실패: {e}")
raise ExternalAPIError()
# 음식명 Embedding Vector 변환
query_vector = await get_embedding(query_name)

# Pinecone에서 유사도 검색
results = index.query(
vector=query_vector,
top_k=top_k * candidate_multiplier,
include_metadata=True
)

# 결과 처리 (점수 필터링 적용)
candidates = [
{
'food_pk': match['id'],
'food_name': match['metadata']['food_name'],
'score': match['score']
}
for match in results['matches'] if match['score'] >= score_threshold
]

# Pinecone에서 유사도 검색
results = index.query(
vector=query_vector,
# 결과값 갯수 설정
top_k=top_k,
# 메타데이터 포함 유무
include_metadata=True
)
# 유사도 점수를 기준으로 내림차순 정렬
sorted_candidates = sorted(candidates, key=lambda x: x["score"], reverse=True)

# 상위 top_k개 선택
final_results = sorted_candidates[:top_k]

# 결과 처리 (점수 필터링 적용)
similar_foods = [
{
'food_pk': match['id'],
'food_name': match['metadata']['food_name'],
'score': match['score']
}
for match in results['matches'] if match['score'] >= score_threshold
]
# null로 채워서 항상 top_k 크기로 반환
while len(final_results) < top_k:
final_results.append({'food_name': None, 'food_pk': None})

# null로 채워서 항상 top_k 크기로 반환
while len(similar_foods) < top_k:
similar_foods.append({'food_name': None, 'food_pk': None})
return final_results

return similar_foods[:top_k]
except Exception as e:
logger.error(f"유사도 검색 실패: {e}")
raise ExternalAPIError()


# Redis의 정의된 잔여 기능 횟수 확인
def get_remaining_requests(member_id: int):
async def get_remaining_requests(member_id: int):

try:
# Redis 키 생성
Expand Down
3 changes: 3 additions & 0 deletions server/core/config_dev.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ class Settings:
# OpenAI
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

# Upstage
UPSTAGE_API_KEY = os.getenv("UPSTAGE_API_KEY")

# Data
DATA_PATH = os.getenv("DATA_PATH")
DOCKER_DATA_PATH = os.getenv("DOCKER_DATA_PATH")
Expand Down
3 changes: 3 additions & 0 deletions server/core/config_local.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,6 +22,9 @@ class Settings:
# OpenAI
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

# Upstage
UPSTAGE_API_KEY = os.getenv("UPSTAGE_API_KEY")

# Data
DATA_PATH = os.getenv("DATA_PATH")
DOCKER_DATA_PATH = os.getenv("DOCKER_DATA_PATH")
Expand Down
3 changes: 3 additions & 0 deletions server/core/config_prod.py
Original file line number Diff line number Diff line change
Expand Up @@ -20,6 +20,9 @@ class Settings:
# OpenAI
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY")

# Upstage
UPSTAGE_API_KEY = os.getenv("UPSTAGE_API_KEY")

# Data
DATA_PATH = os.getenv("DATA_PATH")
PROMPT_PATH = os.getenv("PROMPT_PATH")
Expand Down
23 changes: 0 additions & 23 deletions server/models/food_analysis_model.py

This file was deleted.

Loading