Skip to content
New issue

Have a question about this project? Sign up for a free GitHub account to open an issue and contact its maintainers and the community.

By clicking “Sign up for GitHub”, you agree to our terms of service and privacy statement. We’ll occasionally send you account related emails.

Already on GitHub? Sign in to your account

Update import and add rate limiting, fix csv writing for Windows #39

Merged
merged 2 commits into from
Apr 28, 2021
Merged
Show file tree
Hide file tree
Changes from all commits
Commits
File filter

Filter by extension

Filter by extension

Conversations
Failed to load comments.
Loading
Jump to
Jump to file
Failed to load files.
Loading
Diff view
Diff view
30 changes: 14 additions & 16 deletions export_trakt.py
Original file line number Diff line number Diff line change
Expand Up @@ -138,16 +138,12 @@ def write_csv(options, results):
"""Write list output into a CSV file format"""
if options.verbose:
print("CSV output file: {0}".format(options.output))
# Write result CSV
with open(options.output, 'w', encoding = 'utf-8') as fp:
keys = {}
for i in results:
for k in list(i.keys()):
keys[k] = 1
mycsv = csv.DictWriter(fp, fieldnames=list(keys.keys()), quoting=csv.QUOTE_ALL)
# Write result CSV, works with windows now
with open(options.output, 'w', encoding = 'utf-8', newline='') as fp:
mycsv = csv.DictWriter(fp, fieldnames=list(results[0].keys()), quoting=csv.QUOTE_ALL)
mycsv.writeheader()
for row in results:
mycsv.writerow(row)
mycsv.writerow(row)
fp.close()

def api_auth(options):
Expand Down Expand Up @@ -346,7 +342,7 @@ def main():
print("trakt: {}".format(_trakt))
print("Authorization header: {}".format(_headers['Authorization']))

## Get lits from Trakt user
## Get lists from Trakt user
export_data = []
if options.userlist:
export_data = api_get_userlists(options, 1)
Expand Down Expand Up @@ -391,24 +387,26 @@ def main():
export_csv = []
find_dupids = []
for data in export_data:
#pp.pprint(data)
if options.type[:-1] != "episode" and 'imdb' in data[options.type[:-1]]['ids']:
#pp.pprint(data)
# If movie or show
if options.type[:-1] != "episode" and 'imdb' in data[options.type[:-1]]['ids']:
find_dupids.append(data[options.type[:-1]]['ids']['imdb'])
export_csv.append({ 'imdb' : data[options.type[:-1]]['ids']['imdb'],
'trakt_id' : data[options.type[:-1]]['ids']['trakt'],
'trakt' : data[options.type[:-1]]['ids']['trakt'],
options.time : data[options.time],
'title' : data[options.type[:-1]]['title']})
elif 'tmdb' in data[options.type[:-1]]['ids']:
# If episode
elif 'tmdb' in data[options.type[:-1]]['ids']:
find_dupids.append(data[options.type[:-1]]['ids']['tmdb'])
if not data['episode']['title']: data['episode']['title'] = "no episode title"
export_csv.append({ 'tmdb' : data[options.type[:-1]]['ids']['tmdb'],
'trakt_id' : data[options.type[:-1]]['ids']['trakt'],
'trakt' : data[options.type[:-1]]['ids']['trakt'],
options.time : data[options.time],
'season' : data[options.type[:-1]]['season'],
'episode' : data[options.type[:-1]]['number'],
'episode_title' : data['episode']['title'],
'show_title' : data['show']['title']})
#print export_csv
# print(export_csv)
## Write export data into CSV file
write_csv(options, export_csv)

Expand Down Expand Up @@ -445,7 +443,7 @@ def main():
sent=cleanup_results['sentids'], type=options.type,
deleted=cleanup_results['deleted'], not_found=cleanup_results['not_found']))

## Found duplicate and remove duplicate
## Find duplicate and remove duplicate
dup_ids = [item for item, count in list(collections.Counter(find_dupids).items()) if count > 1]
print("Found {dups} duplicate out of {total} {entry}".format(
entry=options.type, dups=len(dup_ids), total=len(find_dupids)))
Expand Down
44 changes: 26 additions & 18 deletions import_trakt.py
Original file line number Diff line number Diff line change
Expand Up @@ -22,8 +22,10 @@
import requests
requests.packages.urllib3.disable_warnings()
import csv
#from ratelimit import limits
import time
except:
sys.exit("Please use your favorite mehtod to install the following module requests and simplejson to use this script")
sys.exit("Please use your favorite method to install the following module requests and simplejson to use this script")

import argparse
import configparser
Expand Down Expand Up @@ -136,7 +138,7 @@ def read_config(options):

def read_csv(options):
"""Read CSV of Movies or TVShows IDs and return a dict"""
reader = csv.reader(options.input, delimiter=',')
reader = csv.DictReader(options.input, delimiter=',')
return list(reader)

def api_auth(options):
Expand Down Expand Up @@ -202,23 +204,31 @@ def api_get_list(options, page):

return response_arr

# @limits(calls=1, period=1)
def api_add_to_list(options, import_data):
"""API call for Sync / Add items to list"""

# Rate limit for API
time.sleep(1)
url = _trakt['baseurl'] + '/sync/{list}'.format(list=options.list)
#values = '{ "movies": [ { "ids": { "imdb": "tt0000111" } }, { "ids": { , "imdb": "tt1502712" } } ] }'
#values = '{ "movies": [ { "watched_at": "2014-01-01T00:00:00.000Z", "ids": { "imdb": "tt0000111" } }, { "watched_at": "2013-01-01T00:00:00.000Z", "ids": { "imdb": "tt1502712" } } ] }'
if options.type == 'episodes':
values = { 'shows' : import_data }
values = { 'episodes' : import_data }
else:
values = { options.type : import_data }

json_data = json.dumps(values)
if options.verbose:
print("Sending to URL: {0}".format(url))
pp.pprint(json_data)

if _proxy['proxy']:
#print(url)
r = requests.post(url, data=json_data, headers=_headers, proxies=_proxyDict, timeout=(10, 60))
else:
r = requests.post(url, data=json_data, headers=_headers, timeout=(5, 60))

if r.status_code != 201:
print("Error Adding items to {list}: {status} [{text}]".format(
list=options.list, status=r.status_code, text=r.text))
Expand Down Expand Up @@ -371,26 +381,24 @@ def main():
results = {'sentids' : 0, 'added' : 0, 'existing' : 0, 'not_found' : 0}
if read_ids:
print("Found {0} items to import".format(len(read_ids)))

for myid in read_ids:
if myid:
# if not "imdb" it must be a integer
# If id (row) exists and is not blank (has a format)
if myid and myid[options.format]:
#pp.pprint(myid)
if not options.format == "imdb" and not myid[0].startswith('tt'):
myid[0] = int(myid[0])
# If format is not "imdb" it must be cast to an integer
if not options.format == "imdb" and not myid[options.format].startswith('tt'):
myid[options.format] = int(myid[options.format])
if (options.type == "movies" or options.type == "shows") and options.seen:
data.append({'ids':{options.format : myid[0]}, "watched_at": options.seen})
data.append({'ids':{options.format : myid[options.format]}, "watched_at": options.seen})
elif (options.type == "movies" or options.type == "shows") and options.watched_at:
data.append({'ids':{options.format : myid[0]}, "watched_at": myid[1]})
elif options.type == "episodes" and options.seen and myid[1] and myid[2]:
data.append({'ids':{options.format : myid[0]},
"seasons": [ { "number": int(myid[1]), "episodes" :
[ { "number": int(myid[2]), "watched_at": options.seen} ] } ] })
elif options.type == "episodes" and options.watched_at and myid[1] and myid[2] and myid[3]:
data.append({'ids':{options.format : myid[0]},
"seasons": [ { "number": int(myid[1]), "episodes" :
[ { "number": int(myid[2]), "watched_at": myid[3] } ] } ] })
data.append({'ids':{options.format : myid[options.format]}, "watched_at": myid["watched_at"]})
elif options.type == "episodes" and options.seen:
data.append({'ids':{options.format : myid[options.format]},"watched_at": options.seen})
elif options.type == "episodes" and options.watched_at:
data.append({'ids':{options.format : myid[options.format]},"watched_at": myid["watched_at"]})
else:
data.append({'ids':{options.format : myid[0]}})
data.append({'ids':{options.format : myid[options.format]}})
# Import batch of 10 IDs
if len(data) >= 10:
#pp.pprint(json.dumps(data))
Expand Down