-
Notifications
You must be signed in to change notification settings - Fork 9
/
Copy pathcron-updatelist.py
executable file
·163 lines (129 loc) · 5.71 KB
/
cron-updatelist.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
#!/usr/bin/env python
"""
Cron script to update OpenDataset Scene Catalog in
http://sentinel1-slc-seasia-pds.s3-website-ap-southeast-1.amazonaws.com/datasets/slc/v1.1/catalog.csv
"""
from __future__ import print_function
from future import standard_library
standard_library.install_aliases()
import dateutil.parser
from datetime import datetime, timedelta
import argparse
import csv
import os
import boto3
import types
import json
import urllib.parse
import botocore
S3_BUCKET = "sentinel1-slc-seasia-pds"
S3_PREFIX = "datasets/slc/v1.1/"
OPENDATSET_URL = "http://{}.s3-website-ap-southeast-1.amazonaws.com/".format(S3_BUCKET)
CATALOG_FILE = "catalog.csv"
def date_subdirs(start_time, end_time):
date = start_time
datelist = [start_time.strftime("%Y/%m/%d/")]
while date <= end_time:
date += timedelta(days=1)
datelist.append(date.strftime("%Y/%m/%d/"))
return datelist
def get_matching_s3_objects(client, bucket, prefix='', suffix=''):
"""Return list of objects under an s3 prefix per
https://alexwlchan.net/2018/01/listing-s3-keys-redux/."""
kwargs = {'Bucket': bucket}
if isinstance(prefix, (str,)):
kwargs['Prefix'] = prefix
while True:
resp = client.list_objects_v2(**kwargs)
try: contents = resp['Contents']
except KeyError: return
for obj in contents:
key = obj['Key']
if key.startswith(prefix) and key.endswith(suffix):
print("We found one %s" % key)
yield obj
try: kwargs['ContinuationToken'] = resp['NextContinuationToken']
except KeyError: break
def read_catalog_file(resource, local_file):
key = "{}{}".format(S3_PREFIX, CATALOG_FILE)
try:
resource.Bucket(S3_BUCKET).download_file(key, local_file)
print("Downloaded {} from {}:{}".format(local_file, S3_BUCKET, key))
except botocore.exceptions.ClientError as e:
if e.response['Error']['Code'] == "404":
print("The object does not exist.")
else:
raise
with open(local_file, 'r') as rf:
reader = csv.reader(rf, delimiter=',')
id = []
for row in reader:
id.append(row[0])
return id
def upload_catalog_file(resource, local_file):
key = "{}{}".format(S3_PREFIX, CATALOG_FILE)
with open(local_file, 'rb') as data:
resource.Bucket(S3_BUCKET).put_object(Key=key, Body=data)
print("Uploaded {} to {}:{}".format(local_file,S3_BUCKET, key))
def gather_scenes(start_time, end_time):
session = boto3.session.Session(profile_name='opendataset')
s3_client = session.client('s3')
s3_resource = session.resource('s3')
datelist = date_subdirs(start_time, end_time)
local_file = os.path.join(os.path.expanduser('~'), CATALOG_FILE)
scenes_in_file = read_catalog_file(s3_resource, local_file)
scenes = []
for day in datelist:
# scrub all folders to find zip and metadata for listing
prefix = os.path.join(S3_PREFIX, day)
print(prefix)
obj_list = get_matching_s3_objects(s3_client, S3_BUCKET, prefix=prefix, suffix="met.json")
for obj in obj_list:
content_obj = s3_resource.Object(S3_BUCKET, obj['Key'])
metadata = json.loads(content_obj.get()['Body'].read().decode('utf-8'))
this_id = os.path.splitext(metadata['archive_filename'])[0]
if this_id not in scenes_in_file:
metadata["id"] = this_id
metadata["download_url"] = urllib.parse.urljoin(urllib.parse.urljoin(OPENDATSET_URL, obj['Key']),
"./{}".format(metadata['archive_filename']))
latitudes = [xy[0] for xy in metadata['bbox']]
longitudes = [xy[1] for xy in metadata['bbox']]
metadata["minLat"] = min(latitudes)
metadata["maxLat"] = max(latitudes)
metadata["minLon"] = min(longitudes)
metadata["maxLon"] = max(longitudes)
print("%s not found in current scene list, appending." % this_id)
scenes.append(metadata)
else:
print("%s found in current scene list. Skipping..." % this_id)
print("We have to update %s scenes in the catalog. Runtime: %s" % (len(scenes), datetime.utcnow().isoformat()))
with open(local_file, 'a') as fd:
writer = csv.writer(fd)
for metadata in scenes:
# id,platform,orbitNumber,orbitRepeat,trackNumber,direction,
# sensingStart,sensingStop,minLat,minLon,maxLat,maxLon,download_url
row = [metadata['id'],
metadata['platform'],
metadata['orbitNumber'],
metadata['orbitRepeat'],
metadata['trackNumber'],
metadata['direction'],
metadata['sensingStart'],
metadata['sensingStop'],
metadata['minLat'],
metadata['minLon'],
metadata['maxLat'],
metadata['maxLon'],
metadata['download_url']
]
writer.writerow(row)
fd.close()
upload_catalog_file(s3_resource, local_file)
if __name__ == "__main__":
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("starttime", help="Start time in ISO8601 format", nargs='?',
default="%sZ" % (datetime.utcnow()-timedelta(days=5)).isoformat())
parser.add_argument("endtime", help="End time in ISO8601 format", nargs='?',
default="%sZ" % datetime.utcnow().isoformat())
args = parser.parse_args()
gather_scenes(dateutil.parser.parse(args.starttime), dateutil.parser.parse(args.endtime))