-
Notifications
You must be signed in to change notification settings - Fork 1
/
rssToBuffer.py
executable file
·306 lines (254 loc) · 10.3 KB
/
rssToBuffer.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
#!/usr/bin/env python
# encoding: utf-8
#
# Very simple Python program to publish the entries of an RSS recentFeed in
# several channels of bufferapp. It uses three configuration files.
#
# - The first one includes the RSS recentFeed of the blog [~/.rssBlogs]
# [Blog3]
# rssFeed:http://fernand0.tumblr.com/rss
#
# There can exist several blogs, and more parameters if needed for other things
# the program will ask which one we want to publish.
#
# - The second one includes the secret data of the buffer app [~/.rssBuffer]
# [appKeys]
# client_id:XXXXXXXXXXXXXXXXXXXXXXXX
# client_secret:XXXXXXXXXXXXXXXXXXXXXXXXXXXxXXXX
# redirect_uri:XXXXXXXXXXXXXXXXXXXXXXXXX
# access_token:XXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXXX
#
# These data can be obtained registering an app in the bufferapp site.
# Follow instructions at:
# https://bufferapp.com/developers/api
#
# - The third one contains the last published URL [~/.rssBuffer.last]
# It contains just an URL which is the last one published.
# At this moment it only considers one blog
import os
import configparser
import feedparser
import logging
import re
from bs4 import BeautifulSoup
# sudo pip install buffpy version does not work
# Better use:
# git clone https://github.com/vtemian/buffpy.git
# cd buffpy
# sudo python setup.py install
from colorama import Fore
from buffpy.api import API
from buffpy.managers.profiles import Profiles
from buffpy.managers.updates import Update
import time
import sys
import urllib.request, urllib.parse, urllib.error
import imp
imp.reload(sys)
#sys.setdefaultencoding("UTF-8")
PREFIX = "rssBuffer_"
POSFIX = "last"
def selectBlog(sel='a'):
config = configparser.ConfigParser()
config.read([os.path.expanduser('~/.rssBlogs')])
print("Configured blogs:")
feed = []
# We are caching the feeds in order to use them later
i = 1
for section in config.sections():
rssFeed = config.get(section, "rssFeed")
feed.append(feedparser.parse(rssFeed))
lastPost = feed[-1].entries[0]
print('%s) %s %s (%s)' % (str(i), section,
config.get(section, "rssFeed"),
time.strftime('%Y-%m-%d %H:%M:%SZ',
lastPost['published_parsed'])))
if (i == 1) or (recentDate < lastPost['published_parsed']):
recentDate = lastPost['published_parsed']
recentFeed = feed[-1]
recentPost = lastPost
i = i + 1
if (sel == 'm'):
if (int(i) > 1):
recentIndex = input('Select one: ')
i = int(recentIndex)
recentFeed = feed[i - 1]
else:
i = 1
if i > 0:
recentFeedBase = recentFeed.feed['title_detail']['base']
ini = recentFeedBase.find('/')+2
fin = recentFeedBase[ini:].find('.')
identifier = recentFeedBase[ini:ini+fin] + \
"_" + recentFeedBase[ini+fin+1:ini+fin+7]
print("Selected ", recentFeedBase)
logging.info("Selected " + recentFeedBase)
else:
sys.exit()
selectedBlog = {}
if (config.has_option("Blog"+str(recentIndex), "linksToAvoid")):
selectedBlog["linksToAvoid"] = config.get("Blog" + str(recentIndex),
"linksToAvoid")
else:
selectedBlog["linksToAvoid"] = ""
selectedBlog["twitterAC"] = config.get("Blog" + str(recentIndex),
"twitterAC")
selectedBlog["pageFB"] = config.get("Blog" + str(recentIndex),
"pageFB")
selectedBlog["identifier"] = identifier
print("You have chosen ")
print(recentFeed.feed['title_detail']['base'])
return(recentFeed, selectedBlog)
def lookForLinkPosition(linkLast, recentFeed):
for i in range(len(recentFeed.entries)):
if (recentFeed.entries[i].link == linkLast):
break
#print("i: ", i)
if ((i == 0) and (recentFeed.entries[i].link == linkLast)):
logging.info("No new items")
sys.exit()
else:
if (i == (len(recentFeed.entries)-1)):
logging.info("All are new")
logging.info("Please, check manually")
sys.exit()
# i = len(recentFeed.entries)-1
logging.debug("i: " + str(i))
return i
def connectBuffer():
config = configparser.ConfigParser()
config.read([os.path.expanduser('~/.rssBuffer')])
clientId = config.get("appKeys", "client_id")
clientSecret = config.get("appKeys", "client_secret")
redirectUrl = config.get("appKeys", "redirect_uri")
accessToken = config.get("appKeys", "access_token")
# instantiate the api object
api = API(client_id=clientId,
client_secret=clientSecret,
access_token=accessToken)
logging.debug(api.info)
return(api)
def checkPendingPosts(api):
# We can put as many items as the service with most items allow
# The limit is ten.
# Get all pending updates of a social network profile
serviceList = ['twitter', 'facebook', 'linkedin']
profileList = {}
lenMax = 0
logging.info("Checking services...")
for service in serviceList:
profileList[service] = Profiles(api=api).filter(service=service)[0]
if (len(profileList[service].updates.pending) > lenMax):
lenMax = len(profileList[service].updates.pending)
logging.info("%s ok" % service)
logging.info("There are %d in some buffer, we can put %d" %
(lenMax, 10-lenMax))
return(lenMax, profileList)
def getBlogData(recentFeed, selectedBlog, i=0):
i = 0 # It will publish the last added item
soup = BeautifulSoup(recentFeed.entries[i].title)
theTitle = soup.get_text()
theLink = recentFeed.entries[i].link
soup = BeautifulSoup(recentFeed.entries[i].summary)
theSummary = soup.get_text()
theSummaryLinks = extractLinks(soup, selectedBlog["linksToAvoid"])
theImage = extractImage(soup)
theTwitter = selectedBlog["twitterAC"]
theFbPage = selectedBlog["pageFB"]
print("============================================================\n")
print("Results: \n")
print("============================================================\n")
print(theTitle.encode('utf-8'))
print(theLink)
print(theSummary.encode('utf-8'))
print(theSummaryLinks.encode('utf-8'))
print(theImage)
print(theTwitter)
print(theFbPage)
print("============================================================\n")
return (theTitle, theLink, theSummary, theSummaryLinks,
theImage, theTwitter, theFbPage)
def publishPosts(selectedBlog, profileList, recentFeed, lenMax, i):
for j in range(10-lenMax, 0, -1):
if (i == 0):
break
i = i - 1
post = obtainBlogData(recentFeed, lenMax, i)
print("post",post)
sys.exit()
serviceList = ['twitter', 'facebook', 'linkedin']
for service in serviceList:
line = service
profile = profileList[service]
try:
#profile.updates.new(post)
line = line + ' ok'
time.sleep(3)
except:
line = line + ' fail'
failFile = open(os.path.expanduser("~/." +
PREFIX+selectedBlog['identifier'] +
".fail"), "w")
failFile.write(post)
logging.info(" %s service" % line)
urlFile = open(os.path.expanduser("~/." +
PREFIX + selectedBlog['identifier'] +
"." + POSFIX), "w")
urlFile.write(recentFeed.entries[i].link)
urlFile.close()
def obtainBlogData(recentFeed, lenMax, i):
if (recentFeed.feed['title_detail']['base'].find('tumblr') > 0):
# Link in the content
soup = BeautifulSoup(recentFeed.entries[i].summary)
pageLink = soup.findAll("a")
if pageLink:
theLink = pageLink[0]["href"]
theTitle = pageLink[0].get_text()
if len(re.findall(r'\w+', theTitle)) == 1:
logging.debug("Una palabra, probamos con el titulo")
theTitle = recentFeed.entries[i].title
if (theLink[:26] == "https://www.instagram.com/") and \
(theTitle[:17] == "A video posted by"):
# exception for Instagram videos
theTitle = recentFeed.entries[i].title
if (theLink[:22] == "https://instagram.com/") and \
(theTitle.find("(en") > 0):
theTitle = theTitle[:theTitle.find("(en")-1]
else:
# Some entries do not have a proper link and the rss contains
# the video, image, ... in the description.
# In this case we use the title and the link of the entry.
theLink = recentFeed.entries[i].link
theTitle = recentFeed.entries[i].title.encode('utf-8')
elif (selectedBlog.find('wordpress') > 0):
theTitle = BeautifulSoup(recentFeed.entries[i].title).get_text()
theLink = recentFeed.entries[i].link
else:
logging.info("I don't know what to do!")
# pageImage = soup.findAll("img")
theTitle = urllib.parse.quote(theTitle.encode('utf-8'))
theLink = urllib.parse.quote(theLink,safe=":/")
post = re.sub('\n+', ' ', theTitle) + " " + theLink
# Sometimes there are newlines and unnecessary spaces
# print "post", post
# There are problems with &
logging.info("Publishing... %s" % post)
return post
def main():
logging.basicConfig(filename='/home/ftricas/usr/var/' + PREFIX + '.log',
level=logging.INFO,
format='%(asctime)s %(message)s')
recentFeed, selectedBlog = selectBlog('m')
urlFile = open(os.path.expanduser("~/." +
PREFIX + selectedBlog['identifier'] +
"." + POSFIX), "r")
linkLast = urlFile.read().rstrip() # Last published
i = lookForLinkPosition(linkLast, recentFeed)
i = 0
api = connectBuffer()
lenMax, profileList = checkPendingPosts(api)
logging.info("We have %d items to post" % i)
print(("We have %d items to post" % i))
publishPosts(selectedBlog, profileList, recentFeed, lenMax, i)
if __name__ == '__main__':
main()