-
Notifications
You must be signed in to change notification settings - Fork 1
/
Copy pathfinalngo.py
150 lines (102 loc) · 3.55 KB
/
finalngo.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
import requests
from bs4 import BeautifulSoup
import os
import xlsxwriter
from itertools import chain
import itertools
all_cities_url = []
def find_states(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
images = soup.find_all('a')
for image in images:
link = image['href']
if 'https://www.indiangoslist.com/tripura/ngos-in-' in link:
# print(link)
all_cities_url.append(link)
else:
pass
return all_cities_url
find_states('https://www.indiangoslist.com/tripura-ngos-list')
def getCitiesUrls(url):
r = requests.get(url)
all_ngo_url = []
soup = BeautifulSoup(r.text, 'html.parser')
images = soup.find_all('a')
for image in images:
link = image['href']
if 'https://www.indiangoslist.com/ngo-address/' in link:
all_ngo_url.append(link)
else:
pass
return all_ngo_url
def getNgos(url):
r = requests.get(url)
soup = BeautifulSoup(r.text, 'html.parser')
images = soup.find_all('a')
for image in images:
link = image['href']
if 'https://www.indiangoslist.com/ngo-address/' in link:
return link
else:
pass
# links = []
# for i in range(len(all_cities_url)):
# ngos = getCitiesUrls(all_cities_url[i])
# for i in ngos:
# link = getNgosfromCity(i)
# links.append(link)
links = []
for i in range(2):
ngos = getCitiesUrls(all_cities_url[i])
for i in ngos:
link = getNgos(i)
links.append(link)
# def getSeperateNgoDetails(url):
# r = requests.get(url)
# soup = BeautifulSoup(r.text, 'html.parser')
# images = soup.find_all('div', class_='ngo_line')
# list_of_page = []
# for exam in images:
# list_of_page.append(exam)
# print(exam.text.strip().replace('\n','').split(" "))
# row = 1
# print("Global row is = ", row)
# def row_incrementor():
# global row
# row = row + 20
# return row
workbook = xlsxwriter.Workbook("/home/aadarsh/Desktop/ngo.xlsx")
def getSeperateNgoDetails():
for iteration, link in enumerate(links, start=1):
r = requests.get(link)
print(iteration)
soup = BeautifulSoup(r.text, 'html.parser')
images = soup.find_all('div', class_='ngo_line')
list_of_page = []
headers = ["NGO Name", "Unique Id of VO/NGO", "Chief Functionary", "Chairman", "Secretary", "Type of NGO", "Registration Number", "frca", "City", "State", "Telephone", "Mobile Number", "Address", "Email", "Website", "Key Issues"]
if iteration:
worksheet1 = workbook.add_worksheet()
worksheet1.write('A1', 'test')
row = 1
col = 0
for exam in images:
res = exam.text.strip().replace("\n", "").split(" ")
d = dict(itertools.zip_longest(*[iter(res)] * 2, fillvalue=""))
# print(res)
# print(d)
# if res[0] in headers:
# print(res)
# element = list(d.items())
# print(element[0])
for i,j in d.items():
if i in headers:
# print(i,j)
worksheet1.write(row, col, i)
worksheet1.write(row, col+1, j)
row+= 1
# for link in links:
# getSeperateNgoDetails(link)
getSeperateNgoDetails()
workbook.close()
# getting all ngos from one page