Skip to content

Commit

Permalink
Removed Optimistic Retry (#27)
Browse files Browse the repository at this point in the history
* Removed optimistic retry

* Managing RLE

* Clean up tests
  • Loading branch information
jarredSafegraph authored Feb 7, 2024
1 parent faca8db commit 684a6a3
Show file tree
Hide file tree
Showing 2 changed files with 24 additions and 18 deletions.
28 changes: 14 additions & 14 deletions placekey/api.py
Original file line number Diff line number Diff line change
@@ -1,3 +1,4 @@
import hashlib
import json
import logging
import itertools
Expand Down Expand Up @@ -131,11 +132,7 @@ def lookup_placekey(self,
if strict_name_match:
payload['options'] = {"strict_name_match": True}

# Make request, and retry if there is a server-side rate limit error
while True:
result = self.make_request(payload)
if result.status_code != 429:
break
result = self.make_request(payload)

return json.loads(result.text)

Expand Down Expand Up @@ -203,7 +200,7 @@ def lookup_placekeys(self,
)
except RateLimitException:
self.logger.error(
'Fatal error encountered. Returning processed items.')
'Fatal error encountered. Returning processed items at size %s of %s', i, len(places))
break

# Catch case where all queries in batch having an error,
Expand All @@ -212,7 +209,7 @@ def lookup_placekeys(self,
self.logger.info(
'All queries in batch (%s, %s) had errors', i, max_batch_idx)

res = [{'query_id': query_id, 'error': res['error']}
res = [{'query_id': query_id, 'error': res['error']}
for query_id in batch_query_ids]

# Catch other server-side errors
Expand Down Expand Up @@ -252,7 +249,7 @@ def lookup_batch(self, places,
if len(places) > self.MAX_BATCH_SIZE:
raise ValueError(
'{} places submitted. The number of places in a batch can be at most {}'
.format(len(places), self.MAX_BATCH_SIZE)
.format(len(places), self.MAX_BATCH_SIZE)
)

batch_payload = {
Expand All @@ -263,11 +260,7 @@ def lookup_batch(self, places,
if strict_name_match:
batch_payload['options'] = {"strict_name_match": True}

# Make request, and retry if there is a server-side rate limit error
while True:
result = self.make_bulk_request(batch_payload)
if result.status_code != 429:
break
result = self.make_bulk_request(batch_payload)

return json.loads(result.text)

Expand All @@ -283,12 +276,19 @@ def _get_request_function(self, url, calls, period, max_tries):
:param period: length of rate limiting time period in seconds
:param max_tries: the maximum number of retries before giving up
"""

@on_exception(fibo, RateLimitException, max_tries=max_tries)
@limits(calls=calls, period=period)
def make_request(data):
return requests.post(
response = requests.post(
url, headers=self.headers,
data=json.dumps(data).encode('utf-8')
)

if response.status_code == 429:
raise RateLimitException("Rate limit exceeded", 0)

# Assumption: A code other than 429 is handled by calling function
return response

return make_request
14 changes: 10 additions & 4 deletions placekey/tests/test_api.py
Original file line number Diff line number Diff line change
Expand Up @@ -3,11 +3,12 @@
To exclude slow tests run `pytest -m"not slow" placekey/tests/test_api.py`.
"""

import os
import random
import unittest

import pytest
import random

from placekey.api import PlacekeyAPI


Expand Down Expand Up @@ -92,11 +93,13 @@ def test_lookup_placekeys(self):
"longitude": -122.44283
}
]
found_placekeys = self.pk_api.lookup_placekeys(places,verbose=True)
print(found_placekeys)
self.assertListEqual(
self.pk_api.lookup_placekeys(places),
[
{'query_id': 'place_0', 'placekey': '226@5vg-7gq-5mk'},
{'query_id': 'thisqueryidaloneiscustom', 'placekey': '227-222@5vg-82n-pgk'},
{'query_id': 'place_0', 'placekey': '22g@5vg-7gq-5mk'},
{'query_id': 'thisqueryidaloneiscustom', 'placekey': '227-223@5vg-82n-pgk'},
{'query_id': 'place_2', 'placekey': '@5vg-82n-kzz'}
]
)
Expand All @@ -116,3 +119,6 @@ def test_lookup_placekeys_slow(self):
results = self.pk_api.lookup_placekeys(lat_long_samples)
self.assertEqual(len(results), num_samples)
self.assertTrue(all(['placekey' in r for r in results]))



0 comments on commit 684a6a3

Please sign in to comment.