forked from IndigoFloyd/SoybeanWebsite
-
Notifications
You must be signed in to change notification settings - Fork 0
/
website.py
470 lines (440 loc) · 18.6 KB
/
website.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
from flask import Flask, render_template, request, redirect, jsonify, send_file, session
import pandas as pd
from pymongo import MongoClient
import shutil
import datetime
import hashlib
import os
import predict_after
import redis
# construct Email text
from email.mime.text import MIMEText
from email.mime.multipart import MIMEMultipart
from email.header import Header
import smtplib
import json
import requests as re
import torch
# public variable, trait name
traitsList = ['ALL',
'MG',
'ST',
'FC',
'P_CLR',
'P_FRM',
'P_DENS',
'POD',
'SC_L',
'SC_CLR',
'H_CLR',
'protein',
'oil',
'Linoleic',
'Linolenic',
'Stem term',
'R1',
'R8',
'Hgt',
'Mot',
'Ldg',
'SQ',
'SdWgt',
'Yield',
'Oleic',
'Palmitic',
'PRR1',
'SCN3',
'Stearic']
# Start the app instance
def index():
return render_template('index.html')
app = Flask(__name__)
app.add_url_rule('/SoyDNGP', 'index', view_func=index)
app.secret_key = 'isadiashdiow12324'
# Create a redis instance for progress bar update
host = "127.0.0.1"
port = "6379"
redis_pool = redis.ConnectionPool(host=host, port=port, decode_responses=True)
# Redirect homepage to /SoyDNGP
@app.route('/')
def redirect_to_index():
ip = request.headers['X-Forwarded-For']
key = 'GORBZ-Y27C5-R3GIZ-I6SFZ-BR7U6-3JB6A'
sk = '87HIx3HWaB3mflZfWiXjIaVuiVsuI0r8'
api = f"/ws/location/v1/ip/?ip={ip}&key={key}"
md5 = hashlib.md5(f"{api+sk}".encode('utf-8')).hexdigest()
api_new = "https://apis.map.qq.com" + api + f"&sig={md5}"
result = eval(re.get(api_new).content.decode())
if not int(result['status']):
pos = eval(re.get(api_new).content.decode())['result']['location']
# Link to local MongoDB database
client = MongoClient("mongodb:///")
# Select the test database
db = client.location
# Select location collection under location
collection = db.location
# add location
collection.insert_one(pos)
return redirect('/SoyDNGP')
# Contact page
@app.route('/contact')
def contact():
return render_template('contact.html')
@app.route('/SoyTSS')
# Learn More page
@app.route('/LearnMore')
def LearnMore():
# Link to local MongoDB database
client = MongoClient("mongodb:///")
# Select the test database
db = client.location
# Select location collection under location
collection = db.location
# query all results
rets = collection.find()
# set geometries list
geo = []
id = 0
for ret in rets:
id += 1
posdict = {"id": id, "lat":f"{ret['lat']}", "lng":f"{ret['lng']}"}
geo.append(posdict)
print(geo)
return render_template('learnmore.html', markers=geo)
# Search page
@app.route('/Search')
def Search():
return render_template('lookup.html')
# Upload page
@app.route('/UploadData')
def UploadData():
return render_template('predict.html', df=pd.DataFrame())
# Download the test examples
@app.route('/DownloadExample')
def DownloadExample():
return send_file('/home/wxt/Projects/SoybeanWebsite2/10_test_examples.vcf')
# error page
@app.errorhandler(400)
def page_not_found(error):
return render_template('errors.html')
@app.errorhandler(404)
def page_not_found(error):
return render_template('errors.html')
@app.errorhandler(500)
def internal_server_error(error):
return render_template('errors.html')
@app.errorhandler(504)
def internal_server_error(error):
return render_template('errors.html')
@app.route('/submit', methods=['POST'])
def submit():
# correspond to action /submit
if request.method == 'POST':
# Get the ID from the form and split it into a list
ID = request.form['ID'].split(';')
# Deduplication and arrangement of ID list
new_ID = list(set(ID))
new_ID.sort(key=ID.index)
# Get the traits selected by the user from the form
traits = request.form.getlist('options')
# Create an empty list for storing query results
results = []
# When neither the id nor the selected trait is empty
if len(new_ID) and len(traits) != 0:
# traversal ID
for id in new_ID:
# If selected as "all"
if traits[0] == 'all':
# Equivalent to selecting all traits
traits = traits[1:]
# Get all selected trait names according to traitsList
traitsNames = [traitsList[int(i)] for i in traits]
# Link to local MongoDB database
client = MongoClient("mongodb://")
# Select the test database
db = client.test
# Select the test collection under test
collection = db.test
# Query by acid
print(id)
rets = collection.find({"$or": [{'acid': id}, {"CommonName": id}]})
# Create a dictionary of {ID, trait 1, trait 2...} for easy dataframe creation
result = {"ID or Common Name": f"<span class='text-dark'><b>{id}</b></span>"}
# Traverse query results
for i in rets:
# Iterate over selected trait names
for j in traitsNames:
# i is a dict object, use the get method to query the corresponding trait content, if not, it will default to No result
value = i.get(j, 'No result')
# add to dictionary
result[j] = value
# add the result to the list
results.append(result)
# The result can be displayed because it is not empty
showresult = True
# One is empty, no results are displayed, and the query result is empty
else:
results = None
showresult = False
resultDF = pd.DataFrame(results)
return render_template('/lookup.html', showresult=showresult, results=resultDF, col_names=resultDF.columns, ID=ID)
@app.route('/upload', methods=['POST'])
def upload():
# Take a thread, each upload is equivalent to creating a new task
r = redis.Redis(connection_pool=redis_pool)
# Initialize taskID
taskID = hashlib.md5(os.urandom(20)).hexdigest()
session['taskID'] = taskID
task_session_ = {"taskID": taskID, "md5": "", "join": "", "traits": "", "filePath": "", "fileName": "",
"predict_finish": False}
# Get user uploaded files from files
file = request.files.get('file')
# It is taken out from the cache and saved in byte
content = file.read()
# if content is not empty
if content:
# Calculate MD5
file_md5 = hashlib.md5(content).hexdigest()
# Use md5 as a session variable
task_session_['md5'] = file_md5
# Record the time and convert it to a list with a total of 7 elements
date = str(datetime.datetime.now()).split(' ')
# Combine the new file name with date + md5, e.g."2023-06-06-16.41.05.379780-a8d2a9a59092c18c35f688be915a5bb6"
newfilename = date[0] + '-' + date[1].replace(':', '.') + '-' + file_md5
# If the path does not exist
if not os.path.exists(f"./{newfilename}"):
# create folder
os.mkdir(f"./{newfilename}")
# Compose the path
savepath = f"./{newfilename}/" + file.filename
# The global variable is assigned the file name
task_session_['fileName'] = file.filename
# Assembly storage directory
task_session_['filePath'] = f"./{newfilename}/"
# Upload session, update filePath, fileName and md5
r.set("task_session", json.dumps({taskID: task_session_}))
# link database
client = MongoClient("mongodb://xtlab:S2o0y2D3N0G6P@localhost:27017/")
# open files
db = client.files
# query MongoDB
result = db.files.find_one({'md5': file_md5})
# If there is no record, it means uploading for the first time
if not result:
# write file to local
with open(savepath, 'wb') as f:
f.write(content)
# Insert md5 and corresponding storage directory records
db.files.insert_one({'path':savepath, 'md5': file_md5})
else:
# Copy the file from the directory if there is already a record
shutil.copyfile(result['path'], savepath)
return jsonify({'errno':0, 'errmsg':'success'})
else:
return jsonify({'errno':1, 'errmsg':'file is empty'})
@app.route('/getArgs', methods=['POST'])
def getArgs():
if request.method == "POST":
# Obtain whether the user agrees to join the database
data = request.json
join = data.get("join")
# Get trait id from form
traits = data.get('options')
# update to session
r = redis.Redis(connection_pool=redis_pool)
task_session_ = json.loads(r.get("task_session"))[session['taskID']]
task_session_['join'] = join
task_session_['traits'] = traits
r.set("task_session", json.dumps({session['taskID']: task_session_}))
return 'success'
@app.route('/Predict', methods=['GET', 'POST'])
def JoinOrNot():
torch.cuda.empty_cache()
if request.method == 'POST':
# Get the date and md5 and assemble it into taskID to identify different requests
date = str(datetime.datetime.now()).split(' ')
# Get a redis thread from the thread pool
r = redis.Redis(connection_pool=redis_pool)
# Retrieve session
task_session_ = json.loads(r.get("task_session"))[session['taskID']]
# If the trait is not null
worker = None
if len(task_session_['traits']) != 0:
# Determine whether to click Select All and change traitsNames
if task_session_['traits'][-1] != 'all':
print(task_session_['filePath'] + task_session_['fileName'])
traitsNames = [traitsList[int(i)] for i in task_session_['traits']]
# start forecasting
worker = predict_after.predict(task_session_['filePath'] + task_session_['fileName'], traitsNames,
task_session_['filePath'], r, taskID=session['taskID'],
if_all=False)
else:
traits = task_session_['traits'][:-1]
traitsNames = [traitsList[int(i)] for i in traits]
worker = predict_after.predict(task_session_['filePath'] + task_session_['fileName'], traitsNames,
task_session_['filePath'], r, taskID=session['taskID'],
if_all=False)
torch.cuda.empty_cache()
# Set the predict_finish state to True and update to the global variable
progressdict = json.loads(r.get('progressdict'))[session['taskID']]
task_session_['predict_finish'] = True
r.set("task_session", json.dumps({session['taskID']: task_session_}))
progressdict['predict_finish'] = True
r.set('progressdict', json.dumps({session['taskID']: progressdict}))
# fetch taskdict
taskdict = json.loads(r.get('taskdict'))[session['taskID']]
# Because df cannot be directly jsonized, it needs to be converted into JSON before passing it into taskdict, and it must be parsed before use
resultJSON = taskdict['result']
resultDF = pd.read_json(resultJSON, encoding="utf-8", orient='records')
# resultDF = pd.read_csv(r"C:\Users\PinkFloyd\OneDrive\桌面\predict.csv")
# If the user agrees to join
if task_session_['join'] == 'yes':
client = MongoClient("mongodb://xtlab:S2o0y2D3N0G6P@localhost:27017/")
db = client.test
collection = db.test
resultList = []
for i in range(len(resultDF)):
# read each row of data
row = resultDF.iloc[i, :]
# read ID
seedID = row['acid']
# Get Common Name
rets = collection.find({'acid': seedID})
CommonName = ""
for ret in rets:
CommonName = ret.get('CommonName', "")
# create dictionary
if len(CommonName):
seedDict = {"acid": seedID, "CommonName": CommonName}
else:
seedDict = {"acid": seedID}
# Add trait content (col_names[0] is id)
for name in taskdict['col_names'][1:]:
trait = name
# To judge whether it is a missing value, make a special mark
if worker.IsMissing:
value = f"**{row[trait]}**(predict, uploaded at {date[0] + '-' + date[1].replace(':', '.')})"
else:
value = f"{row[trait]}(predict, uploaded at {date[0] + '-' + date[1].replace(':', '.')})"
# assembled dictionary
seedDict[trait] = value
# add sample
resultList.append(seedDict)
collection.insert_many(resultList)
elif task_session_['join'] == 'no':
pass
temp = resultDF['acid']
resultDF['acid'] = temp.map(lambda x: f"<span class='text-dark'><b>{x}</b></span>")
# Set the number of results that can be displayed per page
rows_per_page = 3
# Calculate the total number of pages
taskdict['total_pages'] = len(resultDF) // rows_per_page + 1
# Set the current page number
taskdict['page'] = 1
# Calculate the number of starting rows
start_row = (taskdict['page'] - 1) * rows_per_page
# Set the number of ending rows (what row is the last row displayed on the current page in resultDF)
end_row = start_row + rows_per_page
# Slicing resultDF with row numbers
df_slice = resultDF.iloc[start_row:end_row]
# Get the column names of the form
taskdict['col_names'] = resultDF.columns.tolist()
# upload to redis
r.set('taskdict', json.dumps({session['taskID']: taskdict}))
return render_template('result.html', df=df_slice, total_pages=taskdict['total_pages'],
page=taskdict['page'],
predict_finish=task_session_['predict_finish'],
col_names=taskdict['col_names'])
@app.route("/progress")
def update_progress():
# Fetch a thread
r = redis.Redis(connection_pool=redis_pool)
# Retrieve progressdict
progressdict = json.loads(r.get('progressdict'))[session['taskID']]
return progressdict
@app.route('/pagenext')
def pagenext():
# Fetch a thread
r = redis.Redis(connection_pool=redis_pool)
# retrieve taskdict
taskdict = json.loads(r.get('taskdict'))[session['taskID']]
task_session_ = json.loads(r.get('task_session'))[session['taskID']]
taskdict['page'] += 1
# update page
r.set('taskdict', json.dumps({session['taskID']: taskdict}))
rows_per_page = 3
resultJSON = taskdict['result']
resultDF = pd.read_json(resultJSON, encoding="utf-8", orient='records')
temp = resultDF['acid']
resultDF['acid'] = temp.map(lambda x: f"<span class='text-dark'><b>{x}</b></span>")
start_row = (taskdict['page'] - 1) * rows_per_page
end_row = start_row + rows_per_page
df_slice = resultDF.iloc[start_row:end_row]
return render_template('result.html', df=df_slice, total_pages=taskdict['total_pages'],
page=taskdict['page'],
predict_finish=task_session_['predict_finish'], col_names=taskdict['col_names'])
@app.route('/pageprev')
def pageprev():
# Fetch a thread
r = redis.Redis(connection_pool=redis_pool)
# retrieve taskdict
taskdict = json.loads(r.get('taskdict'))[session['taskID']]
task_session_ = json.loads(r.get('task_session'))[session['taskID']]
taskdict['page'] -= 1
# update page
r.set('taskdict', json.dumps({session['taskID']: taskdict}))
rows_per_page = 3
resultJSON = taskdict['result']
resultDF = pd.read_json(resultJSON, encoding="utf-8", orient='records')
temp = resultDF['acid']
resultDF['acid'] = temp.map(lambda x: f"<span class='text-dark'><b>{x}</b></span>")
start_row = (taskdict['page'] - 1) * rows_per_page
end_row = start_row + rows_per_page
df_slice = resultDF.iloc[start_row:end_row, :]
return render_template('result.html', df=df_slice, total_pages=taskdict['total_pages'],
page=taskdict['page'],
predict_finish=task_session_['predict_finish'], col_names=taskdict['col_names'])
@app.route('/download')
def download_file():
# Fetch a thread
r = redis.Redis(connection_pool=redis_pool)
# retrieve taskdict
task_session_ = json.loads(r.get('task_session'))[session['taskID']]
return send_file(f'{task_session_["filePath"]}/predict.csv')
# return send_file(rf"D:\Projects\website\soybean\2023-05-30-3.3751.402995-a8d2a9a59092c18c35f688be915a5bb6\predict.csv")
@app.errorhandler(400)
def page_not_found(error):
return render_template('errors.html')
@app.errorhandler(404)
def page_not_found(error):
return render_template('errors.html')
@app.errorhandler(500)
def internal_server_error(error):
return render_template('errors.html')
@app.route('/send', methods=['POST'])
def send():
if request.method == 'POST':
userName = request.form.getlist('Name')
userEmail = request.form.getlist('Email')
userContent = request.form.getlist('field-2')
print(userName, userEmail, userContent)
mail_sender = '[email protected]'
mail_host = 'smtp.qq.com'
mail_license = ''
mail_receivers = ['[email protected]', '[email protected]']
mail = MIMEMultipart('related')
mail['From'] = f"Website User<{mail_sender}>" # sender
mail['To'] = "zhn<[email protected]>, wxt<[email protected]>" # receiver
mail['Subject'] = Header('网页问题反馈', 'utf-8') # 主题
# Three parameters: the first is the text content, the second plain sets the text format, and the third utf-8 sets the encoding
message = MIMEText(f'用户:{userName[0]}\n反馈:{userContent[0]}\n请回复至:{userEmail[0]}', 'plain', 'utf-8')
mail.attach(message)
stp = smtplib.SMTP()
stp.connect(mail_host, 587)
stp.login(mail_sender, mail_license)
# Send email, pass parameter 1: sender email address, parameter 2: recipient email address, parameter 3: change the format of email content to str
stp.sendmail(mail_sender, mail_receivers, mail.as_string())
stp.quit()
return render_template('thanksforcontact.html')
if __name__ == "__main__":
app.run(host="0.0.0.0", port=8080, threaded=False)