forked from dilrajsingh1997/Python
-
Notifications
You must be signed in to change notification settings - Fork 0
/
Copy pathSong_downloader.py
63 lines (55 loc) · 2.04 KB
/
Song_downloader.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
import requests
import urllib2
import warnings
import sys
from bs4 import BeautifulSoup
warnings.filterwarnings("ignore")
def crawler(url):
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text, "html.parser")
for noresult in soup.findAll('div', {'class':'touch'}):
if noresult.string=='Cant find any file for Given criteria':
print 'No results found'
a = raw_input("Press <enter> to continue")
for link in soup.findAll('a', {'class':"touch"}):
href = link.get('href')
title = link.string
print unicode(title)
x = raw_input("Do you want to save the file (y/n/e) ? ")
if(x=='e'):
exit()
if(x=='y'):
print("Downloading " + title + "...")
get_item(href, title)
print("\n")
for link in soup.findAll('a', {'class':"rightarrow"}):
href = link.get('href')
crawler(href.replace(" ", "%20"))
def get_item(url, title):
source_code = requests.get(url)
plain_text = source_code.text
soup = BeautifulSoup(plain_text)
for item_name in soup.findAll('a', {'class':'touch'}):
if item_name.string=="[ Download File ]":
down(item_name.get("href"), title)
def down(url, file):
request = urllib2.Request(url)
with open (file + '.mp3', 'wb') as f:
response = requests.get(url, stream = True)
tlength = response.headers.get('content-length')
if tlength is None:
f.write(response.content)
else:
dl = 0
tlength = int(tlength)
for data in response.iter_content(chunk_size = 4096):
dl += len(data)
f.write(data)
done = int(50 * dl / tlength)
sys.stdout.write("\r[%s%s]" % ('#' * done, '-' * (50 - done)))
sys.stdout.flush()
x = raw_input("Enter the movie name : ")
y = raw_input("Enter the format : ")
url = "http://pagalworld.co/search/" + x.replace(" ", "%20") + "/" + y + "/1.html"
crawler(url)