Skip to content
This repository has been archived by the owner on May 9, 2024. It is now read-only.

Commit

Permalink
Merge pull request #190 from WadeFade/184-custom-link-to-get-teams-link
Browse files Browse the repository at this point in the history
184 custom link to get teams link
  • Loading branch information
WadeFade authored Dec 6, 2023
2 parents 7c0c399 + 15da08f commit 77a213a
Show file tree
Hide file tree
Showing 5 changed files with 99 additions and 87 deletions.
3 changes: 2 additions & 1 deletion .bruno/GET EDT.bru
Original file line number Diff line number Diff line change
Expand Up @@ -5,12 +5,13 @@ meta {
}

get {
url: http://127.0.0.1:8000/v1/month?firstname=mathis&lastname=gauthier
url: http://127.0.0.1:8000/v1/month?firstname=mathis&lastname=gauthier&format=ical
body: none
auth: none
}

query {
firstname: mathis
lastname: gauthier
format: ical
}
17 changes: 17 additions & 0 deletions .bruno/get teams link.bru
Original file line number Diff line number Diff line change
@@ -0,0 +1,17 @@
meta {
name: get teams link
type: http
seq: 2
}

get {
url: http://127.0.0.1:8000/v1/teams?firstname=mathis&lastname=gauthier&date_time=2023-12-06T08:00
body: none
auth: none
}

query {
firstname: mathis
lastname: gauthier
date_time: 2023-12-06T08:00
}
9 changes: 8 additions & 1 deletion main.py
Original file line number Diff line number Diff line change
Expand Up @@ -2,7 +2,8 @@
from datetime import datetime
from fastapi import FastAPI
from fastapi.responses import StreamingResponse
from request import get_current

from request import get_current, get_teams_link

app = FastAPI()

Expand All @@ -28,3 +29,9 @@ async def get_edt_month(firstname: str, lastname: str, format: str = None):

headers = {"Content-Disposition": f"attachment; filename={file_name}"}
return StreamingResponse(iter([file.getvalue()]), media_type="text/calendar", headers=headers)


@app.get("/v1/teams")
async def get_edt_teams(firstname: str, lastname: str, date_time: str):
result = await get_teams_link(firstname, lastname, date_time)
return result
156 changes: 71 additions & 85 deletions request.py
Original file line number Diff line number Diff line change
@@ -1,12 +1,19 @@
from datetime import datetime
from datetime import datetime, date
from datetime import timedelta

import pytz
import requests
from bs4 import BeautifulSoup
from icalendar import Calendar, Event, vCalAddress, vText
from dateutils import get_month
from cachetools import cached, TTLCache
from requests_toolbelt.threaded import pool
from dotenv import load_dotenv
import os

load_dotenv()

base_url_server = os.environ.get('BASE_URL_SERVER')

numberOfWeekByMonth = 8

Expand Down Expand Up @@ -36,7 +43,7 @@ async def get_current(firstname, lastname, format):
requests_response.sort(key=lambda x: x.request_kwargs['url'])

for response in requests_response:
result.append(parse_html_per_week(response))
result.append(parse_html_per_week(response, firstname, lastname))

if format is None:
return result
Expand All @@ -45,8 +52,30 @@ async def get_current(firstname, lastname, format):
return ical.to_ical().decode('utf-8')


async def get_teams_link(firstname, lastname, date_time):
parsed_date = date_time.split('T')[0].split('-')
date_cours = date(int(parsed_date[0]), int(parsed_date[1]), int(parsed_date[2])).strftime("%Y-%m-%d")

calendar_url_base_url = 'https://edtmobiliteng.wigorservices.net//WebPsDyn.aspx?action=posEDTBEECOME&serverid=i'
calendar_url_to_scrap = f"{calendar_url_base_url}&Tel={firstname}.{lastname}&date={date_cours}"

response = requests.get(calendar_url_to_scrap)

if response.status_code != 200:
raise Exception('An error has occurred whilst trying to scrape the agenda')
if 'Erreur de parametres' in response.text:
print({'status_code': response.status_code,
'response': response.text,
'url': calendar_url_to_scrap
})
raise Exception('E_SCRAPPING_PARAMETERS')

result = scrap_teams_link(response, date_time)
return result


@cached(cache=TTLCache(maxsize=1024, ttl=10800))
def parse_html_per_week(week_data):
def parse_html_per_week(week_data, firstname, lastname):
result = {}
key = 'week'
result[key] = {}
Expand Down Expand Up @@ -98,9 +127,9 @@ def parse_html_per_week(week_data):
else:
presence = False

link = ''
if el.select('.Teams a'):
link = el.select('.Teams a')[0].get('href')
formated_date = (datetime(int(year), int(day_month), int(day_date)) + timedelta(days=7)).strftime(
"%Y-%m-%d")
link = f"{base_url_server}/v1/teams?firstname={firstname}&lastname={lastname}&date_time={formated_date}T{start}"

data = {'date': new_date, 'subject': subject, 'start': start, 'end': end, 'professor': professor,
'room': room,
Expand All @@ -115,84 +144,41 @@ def parse_html_per_week(week_data):
return result


# @cached(cache=TTLCache(maxsize=1024, ttl=10800))
# def scrap_week(firstname, lastname, queried_date):
# response = requests.get(calendar_url_to_scrap)
# if response.status_code != 200:
# raise Exception('An error has occurred whilst trying to scrape the agenda')
# if 'Erreur de parametres' in response.text:
# print({'status_code': response.status_code,
# 'response': response.text,
# 'url': calendar_url_to_scrap
# })
# raise Exception('E_SCRAPPING_PARAMETERS')
#
# result = {}
# key = 'week'
# result[key] = {}
#
# soup = BeautifulSoup(response.text, 'html.parser')
#
# days = soup.find_all('div', {'class': 'Jour'})
#
# for day, el1 in enumerate(days):
# theDay = day
# courses = soup.find_all('div', {'class': 'Case'})
# leftCss = int(float(el1['style'].split('left:')[1].split(';')[0].replace('%', '')) + 100)
# for course, el in enumerate(courses):
# if (int(float(el['style'].split('left:')[1].split(';')[0].replace('%', ''))) != int(
# float(leftCss) + 9)) and (
# int(float(el['style'].split('left:')[1].split(';')[0].replace('%', ''))) != leftCss or not
# soup.select('.TCJour')[course]):
# continue
#
# day = soup.select('.TCJour')[theDay].text.split(' ')
# # date
# day_date = day[1]
# day_month = get_month(day[2])
# weekday = day[0].lower()
# year = queried_date.split('-')[0]
# new_date = (datetime(int(year), int(day_month), int(day_date)) + timedelta(days=7)).strftime("%d/%m/%Y")
# # time
# start = el.select('.TChdeb')[0].text[:5]
# end = el.select('.TChdeb')[0].text[8:13]
#
# professor = el.select('.TCProf')[0].prettify().split('</span>')[1].split('<br/>')[0]
#
# subject = el.select('.TCase')[0].text.strip()
# if professor.strip() != '':
# subject = subject.split(professor.strip())[0].strip()
# else:
# professor = 'N/A'
# subject = subject.split('INGENIERIE')[0].strip()
#
# bts = 'BTS' in professor
# professor = professor.replace('BTS', '').strip()
# room = el.select('.TCSalle')[0].text.replace('Salle:', '').strip()
# remote = 'distanciel' in subject.lower() or 'distanciel' in room.lower()
#
# # presence
# presence = el.select('.Presence img')
# if presence and presence[0]['src'] == '/img/valide.png' or not presence:
# presence = True
# else:
# presence = False
#
# link = ''
# if el.select('.Teams a'):
# link = el.select('.Teams a')[0].get('href')
#
# data = {'date': new_date, 'subject': subject, 'start': start, 'end': end, 'professor': professor,
# 'room': room,
# 'weekday': weekday, 'bts': bts, 'remote': remote, 'link': link, 'presence': presence}
#
# if weekday in result[key]:
# result[key][weekday].append(data)
# else:
# result[key][weekday] = [data]
#
# result = regroup_courses(result)
# return result
def scrap_teams_link(data, date_time) -> str:
soup = BeautifulSoup(data.text, 'html.parser')
days = soup.find_all('div', {'class': 'Jour'})

parsed_date = date_time.split('T')[0].split('-')
parsed_time = date_time.split('T')[1]
date_cours = date(int(parsed_date[0]), int(parsed_date[1]), int(parsed_date[2])).strftime("%d/%m/%Y")

result_link = ''

for day, el1 in enumerate(days):
theDay = day
courses = soup.find_all('div', {'class': 'Case'})
leftCss = int(float(el1['style'].split('left:')[1].split(';')[0].replace('%', '')) + 100)
for course, el in enumerate(courses):
if (int(float(el['style'].split('left:')[1].split(';')[0].replace('%', ''))) != int(
float(leftCss) + 9)) and (
int(float(el['style'].split('left:')[1].split(';')[0].replace('%', ''))) != leftCss or not
soup.select('.TCJour')[course]):
continue

day = soup.select('.TCJour')[theDay].text.split(' ')
# date
day_date = day[1]
day_month = get_month(day[2])

year = date_time.split('T')[0].split('-')[0]
new_date = (datetime(int(year), int(day_month), int(day_date)) + timedelta(days=7)).strftime("%d/%m/%Y")
# time
start = el.select('.TChdeb')[0].text[:5]

if el.select('.Teams a') and new_date == date_cours and start == parsed_time:
result_link = el.select('.Teams a')[0].get('href')

return result_link


def regroup_courses(result):
Expand Down Expand Up @@ -242,7 +228,7 @@ def generate_ical(result) -> Calendar:
event.add('name', course['subject'])
event.add('summary', course['subject'])
event.add('description',
f"Distanciel: {course['remote']} \nSalle: {course['room']} \nCours de: {course['start']} à {course['end']} \nProfesseur: {course['professor']} \n(Importé le: {date_export})")
f"Distanciel: {course['remote']} \nSalle: {course['room']} \nCours de: {course['start']} à {course['end']} \nProfesseur: {course['professor']} \nLien EDT: {course['link']} \n(Importé le: {date_export})")

start_date = datetime.strptime(course['date'] + ' ' + course['start'], '%d/%m/%Y %H:%M')
end_date = datetime.strptime(course['date'] + ' ' + course['end'], '%d/%m/%Y %H:%M')
Expand Down
1 change: 1 addition & 0 deletions requirements.txt
Original file line number Diff line number Diff line change
Expand Up @@ -13,6 +13,7 @@ idna==3.6
pydantic==2.5.2
pydantic_core==2.14.5
python-dateutil==2.8.2
python-dotenv==1.0.0
pytz==2023.3
requests==2.31.0
requests-toolbelt==1.0.0
Expand Down

0 comments on commit 77a213a

Please sign in to comment.