forked from codeforamerica/snap-it-up
-
Notifications
You must be signed in to change notification settings - Fork 2
/
Copy pathapp.rb
305 lines (260 loc) · 9.03 KB
/
app.rb
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
require 'sinatra'
require 'sinatra/content_for'
require 'net/https'
require 'uri'
require 'json'
require 'fileutils'
require './lib/pingometer.rb'
require './lib/pagesnap.rb'
require './lib/browserstack.rb'
require 'aws-sdk'
require 'httparty'
require 'mongoid'
require 'qu-mongoid'
require './models/monitor_event.rb'
require './models/snapshot.rb'
require './models/incident.rb'
require './jobs/load_pingometer_events.rb'
require './jobs/snapshot_monitor.rb'
PINGOMETER_USER = ENV['PINGOMETER_USER']
PINGOMETER_PASS = ENV['PINGOMETER_PASS']
AWS_KEY = ENV['AWS_KEY']
AWS_SECRET = ENV['AWS_SECRET']
AWS_BUCKET = ENV['AWS_BUCKET']
AWS_REGION = ENV['AWS_REGION']
PRODUCTION = ENV['RACK_ENV'] == 'production'
MONGO_URI = ENV['MONGO_URI'] || ENV['MONGOLAB_URI'] || "mongodb://localhost:27017/snap_it_up"
PAGESNAP_URL = ENV['PAGESNAP_URL']
BROWSERSTACK_USER = ENV['BROWSERSTACK_USER']
BROWSERSTACK_KEY = ENV['BROWSERSTACK_KEY']
USE_WEBHOOK = (ENV['USE_WEBHOOK'] || '').downcase == 'true'
configure do
Mongoid.configure do |config|
config.sessions = {
:default => {
:uri => MONGO_URI
}
}
end
end
Aws.config.merge!({
credentials: Aws::Credentials.new(AWS_KEY, AWS_SECRET),
region: AWS_REGION || 'us-east-1'
})
if BROWSERSTACK_USER && BROWSERSTACK_KEY
Snapshotter = Browserstack.new(BROWSERSTACK_USER, BROWSERSTACK_KEY)
else
Snapshotter = PageSnap.new(PAGESNAP_URL)
end
MonitorList = JSON.parse(File.read('public/data/pingometer_monitors.json'))
get '/' do
# Get all states into a hash
@state_status = Hash[MonitorList.collect {|meta| [meta["state_abbreviation"], true]}]
# mark current incidents as down
Incident.current.each do |incident|
@state_status[incident.state] = false
end
now = Time.now
week_ago = (now.utc.to_date - 6.days).to_time
week_ago = week_ago + week_ago.utc_offset
@state_week_uptime = state_uptimes_between(week_ago, now)
erb :index
end
get '/states/:state_abbreviation' do
state_abbreviation = params[:state_abbreviation].downcase
monitors = MonitorList.find_all {|monitor_info| monitor_info["state_abbreviation"].downcase == state_abbreviation}
if monitors.empty?
monitor = MonitorList.find {|monitor_info| monitor_info["state"].downcase == state_abbreviation}
if monitor
redirect "/states/#{monitor['state_abbreviation']}"
else
raise Sinatra::NotFound
end
return
end
state = monitors[0]["state"]
snapshots = Snapshot.where(state: state_abbreviation.upcase).sort(date: -1)
begin
all_monitors = Pingometer.new(PINGOMETER_USER, PINGOMETER_PASS).monitors
rescue
@error_message = "Our status monitoring system, Pingometer, appears to be having problems."
return erb :error
end
monitors_data = []
all_monitors.each do |monitor_data|
meta = monitors.find {|monitor| monitor["hostname"] == monitor_hostname(monitor_data)}
if meta
monitor_snaps = snapshots.find_all {|snapshot| snapshot.name.start_with? "#{state_abbreviation.upcase}-#{monitor_data['id']}"}
monitors_data << {
:name => monitor_data["name"],
:url => monitor_url(monitor_data),
:status => monitor_data['last_event']['type'] == -1 ? :unknown : (monitor_data['last_event']['type'] == 0 ? :down : :up),
:meta => meta,
:details => monitor_data,
:snapshots => monitor_snaps,
:snapshots_up => monitor_snaps.find_all {|snapshot| snapshot.status == "UP" },
:snapshots_down => monitor_snaps.find_all {|snapshot| snapshot.status != "UP" }
}
end
end
erb :state, {:locals => {
state: state,
monitors: monitors_data
}}
end
# Efficient? NO. BUT IT WORKS.
get '/api/v0/uptime' do
content_type :json
bucket_size = {
'hour' => 1.hour.seconds,
'day' => 1.day.seconds,
'week' => 1.week.seconds,
'month' => 1.month.seconds
}[params[:bucket]]
start_date = make_time(params[:start_date])
end_date = make_time(params[:end_date])
return state_uptime_series(start_date, end_date, bucket_size).to_json
end
post '/hooks/event' do
content_type :json
if !USE_WEBHOOK
logger.info "IGNORING event hook for monitor #{params[:monitor_id]}"
status 501
return { error: "Webhooks are currently ignored." }.to_json
end
logger.info "Received event hook for monitor #{params[:monitor_id]}"
if !params[:monitor_id]
status 400
return { error: "No `monitor_id` included in POST." }.to_json
end
begin
monitor = Pingometer.new(PINGOMETER_USER, PINGOMETER_PASS).monitor(params[:monitor_id])
rescue
logger.error "Failed getting info on monitor #{params[:monitor_id]} from Pingometer"
status 500
return { error: "Our status monitoring system, Pingometer, appears to be having problems." }.to_json
end
monitor_meta = MonitorList.find {|monitor_meta| monitor_meta["hostname"] == monitor_hostname(monitor)}
if monitor_meta && monitor_meta["good_after"] && monitor_meta["good_after"] > Time.parse(monitor['last_event']['utc_timestamp'])
logger.error "Bailed recording event for monitor #{params[:monitor_id]} because the monitor is known to be inaccurate."
status 400
return { error: "Bad monitor" }.to_json
end
state_abbreviation = monitor_state(monitor)['state_abbreviation']
local_event = MonitorEvent.create_from_pingometer(monitor['last_event'], params[:monitor_id], state_abbreviation)
# Update incidents
last_incident = Incident.where(monitor: params[:monitor_id]).current.first || Incident.new
last_incident.add_event(local_event)
last_incident.save
Qu.enqueue(SnapshotMonitor, params[:monitor_id])
return { message: "Event saved." }.to_json
end
# Kind of hacky thing to get an ensured hostname
# (transactional tests don't have hostnames, so get the hostname of the URL it first loads).
# Not in Pingometer API class because there's not a real generic solution to this. (Or should it be?)
def monitor_hostname(monitor)
monitor['hostname'].empty? ? monitor['commands']['1']['get'].match(/^[^\/]+\/\/([^\/]*)/)[1] : monitor['hostname']
end
def monitor_state(monitor)
MonitorList.find {|monitor_info| monitor_info['hostname'] == monitor_hostname(monitor)}
end
def monitor_url(monitor)
if monitor["hostname"] && !monitor["hostname"].empty?
protocol = monitor['type'] && !monitor['type'].empty? ? monitor["type"] : "http"
host = monitor["hostname"]
path = monitor["path"] || ""
query = monitor["querystring"] && !monitor["querystring"].empty? ? "?#{monitor["querystring"]}" : ""
"#{protocol}://#{host}#{path}#{query}"
else
monitor["commands"]["1"]["get"]
end
end
def save_snapshot(name, data)
if PRODUCTION
s3 = Aws::S3::Resource.new
s3.bucket(AWS_BUCKET).object(name).put(
body: data,
acl: "public-read",
content_type: "image/png")
s3.bucket(AWS_BUCKET).object(name).public_url
else
Dir.mkdir("./tmp") unless File.exist?("./tmp")
if !File.exist?(File.dirname("./tmp/#{name}"))
FileUtils.mkdir_p(File.dirname("./tmp/#{name}"))
end
File.open("./tmp/#{name}", "w") do |file|
file << data
end
nil
end
end
def state_uptimes_between(t1, t2)
# Get incidents over a time period, trimmed to the time period
incidents = Incident.intersecting(t1, t2).collect do |incident|
if incident.start_date.nil? || incident.start_date < t1
incident.start_date = t1
end
if incident.end_date.nil? || incident.end_date > t2
incident.end_date = t2
end
incident
end
# Group by state and calculate uptime for each.
timeframe = t2 - t1
uptimes = Hash[MonitorList.collect {|meta| [meta["state_abbreviation"], 100]}]
incidents.group_by {|incident| incident.state}.each do |state, incidents|
monitor_uptimes = incidents.group_by {|incident| incident.monitor}.collect do |monitor, incidents|
# incidents.inject(0) {|downtime, incident| downtime + (incident.end_date - incident.start_date)}
monitor_time = timeframe
downtime = 0
incidents.each do |incident|
seconds = (incident.end_date - incident.start_date)
if incident.accepted?
downtime += seconds
else
monitor_time -= seconds
end
end
if monitor_time
(monitor_time - downtime) / monitor_time
else
1000
end
end
uptime = monitor_uptimes.min
if uptime <= 1
uptimes[state] = 100 * uptime
else
uptimes.delete(state)
end
end
uptimes
end
def state_uptime_series(start_date=nil, end_date=nil, bucket_size=nil)
start_date ||= Time.parse('2015-02-01T00:00:00Z')
end_date ||= Time.now
bucket_size ||= 1.day.seconds
buckets = (end_date - start_date) / bucket_size
series = []
buckets.ceil.times do |index|
date = start_date + index * bucket_size
series << {
date: date.iso8601,
states: state_uptimes_between(date, date + bucket_size)
}
end
return series
end
def make_time(timestamp)
if !timestamp.kind_of? String
return nil
end
if timestamp.match /^\d{2,4}-\d{1,2}-\d{1,2}$/
timestamp = "#{timestamp}T00:00:00Z"
end
begin
Time.parse(timestamp)
rescue
nil
end
end