forked from angela-xu/multi-threaded-web-scraping-indeed.com
-
Notifications
You must be signed in to change notification settings - Fork 0
/
jobscraper.py
324 lines (243 loc) · 11.8 KB
/
jobscraper.py
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
###############################################################################################
#
# Scraper3.0 - Multi-threaded Scraping
#
# Author: Huanzhu Xu
# Latest update: 09-04-2016
#
###############################################################################################
import requests
from bs4 import BeautifulSoup
import re
from nltk.corpus import stopwords
from collections import Counter
from threading import Thread
import queue
import time
import datetime
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.colors as colors
def make_soup(html):
'''
This function takes an HTML object as argument,
and returns a Beautiful Soup object.
html: HTML object
return: Beautiful Soup object
'''
soup = BeautifulSoup(html, 'lxml')
if len(soup) == 0:
soup = BeautifulSoup(html, 'html5lib') # In case lxml does not work
return soup
def get_job_info(url):
'''
This function takes a URL of one job Ad as argument,
cleans up the raw HTMl and returns a one-dimensional list
that contains a set of words appearing in this job Ad.
url: string, a URL
return: list, a one-dimensional list that contains a set of words
'''
try:
html = requests.get(url).text
except:
# In case of connection problems
return
soup = make_soup(html)
for script in soup(['script', 'style']):
# Remove these two elements from soup
script.extract()
text = soup.get_text()
lines = (line.strip() for line in text.splitlines())
# Break multi-headlines into a line each
chunks = (phrase.strip() for line in lines for phrase in line.split(' '))
def chunk_space(chunk):
# Fix spacing issue
chunk = chunk + ' '
return chunk
# Get rid of all blank lines and ends of line
text = ''.join(chunk_space(chunk) for chunk in chunks if chunk).encode('utf-8')
try:
text = text.decode('unicode_escape').encode('ascii', 'ignore')
except:
# As some websites are not formatted in a way that this works
return
text = re.sub('[^a-zA-Z.+3]', ' ', str(text)) # Get rid of any skils that are not words
text = text.lower().split() # Convert to lower case and split them apart
stop_words = set(stopwords.words('english')) # Filter out any stop words
text = [w for w in text if not w in stop_words]
text = list(set(text)) # Get the set of words
return text # One-dimensional list
def get_page_info(url, page_num):
'''
This function takes a URL and a page number as arguments,
combines them into a new URL for search, and returns a two-dimensional list,
where each value of the list is a one-dimensional list that contains a set of words
appearing in one job Ad of this page.
url: string, a base URL before the page number
page_num: int, a page number
return: list, a two-dimensional list where each value of the list is a one-dimensional list
'''
base_url = 'http://www.indeed.com'
page_job_descriptions = []
start_num = str(page_num * 10)
page_url = ''.join([url, '&start=', start_num])
print('Getting page: ' + page_url)
html_page = requests.get(page_url).text
page_soup = make_soup(html_page)
job_link_area = page_soup.find(id = 'resultsCol') # The center column on the page where job Ads exist
if job_link_area == None:
job_link_area = page_soup.find(id = 'resultsCol')
if job_link_area == None:
print('Cannot find job link area for: ' + page_url)
with open('output/failed_to_parse_page.txt', 'a') as text_file:
text_file.write('\n')
text_file.write(datetime.datetime.now().strftime('%Y-%m-%d %H:%M:%S') + '\n')
text_file.write(page_url + '\n')
text_file.write(html_page + '\n')
return page_job_descriptions
job_urls = [base_url + link.get('href') for link in job_link_area.find_all('a', href=True)] # Get the URLs for the jobs
job_urls = [x for x in job_urls if 'clk' in x] # Only get the job related URLs
for i in range(len(job_urls)):
description = get_job_info(job_urls[i])
if description: # Only append when the website was accessed correctly
page_job_descriptions.append(description)
print('Page ' + str(page_num) + ' done with collecting job descriptions' )
return page_job_descriptions # Two-dimensional list
def get_page_info_by_range(url, page_range):
'''
This function takes a URL and a range of page numbers as arguments,
and returns a three-dimensional list where each value of the list is
a two-dimensional list that contains the job descriptions of one page.
url: string, a URL
page_range: list, a list of a range of numbers
return: list, a three-dimensional list
'''
results = []
for i in page_range:
results.append(get_page_info(url, i))
return results # Three-dimensional list
def work(url, page_range, queue):
'''
This function is executed by worker thread in multi-threading mode
It takes a URL, a list of a range of page numbers, and a queue as arguments,
and puts its results into the queue.
url: string, a URL
page_range: list, a list of a range of numbers
queue: queue, a thread-safte queue
'''
result = get_page_info_by_range(url, page_range)
queue.put(result)
def process_url(num_pages, url):
'''
This function is the main thread in multi-threading.
It takes the the number of total pages of job Ads and a URL as arguments,
combines the results of worker threads and returns a four-dimensional list
that contains the results.
num_pages: int, number of total pages of job Ads
url: string, a URL
return: list, a four-dimensioanl list that contains the results of worker threads
'''
# set the maximum page number to be 20
if num_pages < 20:
num_threads = num_pages
else:
num_threads = 20
print('Using {} worker threads'.format(num_threads))
page_group = int(num_pages / num_threads)
q = queue.Queue()
threads = [None] * num_threads
for i in range(0, num_threads):
page_range = range(page_group * i, page_group * (i + 1))
t = Thread(target=work, args=(url, page_range, q))
t.start()
threads[i] = t
time.sleep(1) # Sleep between worker threads
for i in range(0, num_threads):
threads[i].join()
print('Thread {} joined'.format(i))
print("All threads finished")
return [q.get() for _ in range(num_threads)] # Four-dimensional list
def run_scraper(city=None, state=None, job='data+scientist'):
'''
This function takes a city/state as arguments and looks for all job Ads
on Indeed.com with specified city/state. It crawls all of the job Ads and
keeps track of how many use a preset list of typical data science skills.
It returns the number of total job Ads successfullt scraped, a dataframe
that contains information about each science skill with its number and percentage
of appearing in job Ads, and a bar chart displaying the percentage for each skill.
city/state: string, city/state of interest, for example, get_skill_info('Seattle', 'WA').
Use a two letter abbreviation for the state.
City and state must be specified together, or be omitted together.
If city and state are omitted, the function will assume a national search.
return: 1) the number of total job Ads successfully scraped
2) a Pandas dataframe that contains information about each data science skill
with its number and percentage of appearing in job Ads
3) a bar chart for visualization
'''
city_copy = city[:]
if city is not None:
city_list = city.split()
city = '+'.join(city_list)
url_list = ['http://www.indeed.com/jobs?q=', job, '&l=', city, '%2C+', state]
else:
url_list = ['http://www.indeed.com/jobs?q=', job]
url = ''.join(url_list) # URL for job search
print("Using URL " + url)
try:
html = requests.get(url).text
except:
print('The location ' + city_copy + ', ' + state + ' could not be found.')
return
soup = make_soup(html)
num = soup.find(id = 'searchCount')
if num == None:
num = soup.find(id = 'searchCount')
num_jobs = num.string.encode('utf-8')
job_numbers = re.findall('\d+', str(num_jobs)) # Total number of jobs found
# Process commas in large number representations
if len(job_numbers) > 3:
total_num_jobs = (int(job_numbers[2]) * 1000) + int(job_numbers[3])
else:
total_num_jobs = int(job_numbers[2])
print(str(total_num_jobs) + ' jobs found in ' + city_copy + ', ' + state)
num_pages = int(total_num_jobs / 10)
# Multi-threading
total_job_descriptions = process_url(num_pages, url)
# convert 4-dimentional list into 2-dimentional list
total_job_descriptions = sum(sum(total_job_descriptions, []), [])
total_jobs_found = len(total_job_descriptions)
print('Done with collecting the job Ads!')
print('There were ' + str(total_jobs_found) + ' jobs successfully found.')
# Calculating the number and percentage of job Ads having a certain skill
doc_frequency = Counter()
[doc_frequency.update(item) for item in total_job_descriptions]
language_dict = Counter({'Python': doc_frequency['python'], 'R': doc_frequency['r'],
'Java': doc_frequency['java'], 'C++': doc_frequency['c++'],
'Ruby': doc_frequency['ruby'], 'Perl': doc_frequency['perl'],
'MATLAB': doc_frequency['matlab'],'JavaScript': doc_frequency['javascript'],
'Scala': doc_frequency['scala'], 'C#': doc_frequency['c#'],
'PHP': doc_frequency['php'], 'HTML': doc_frequency['html'],
'SAS': doc_frequency['sas'], 'Julia': doc_frequency['julia']})
tool_dict = Counter({'Excel': doc_frequency['excel'], 'Tableau': doc_frequency['tableau'],
'D3.js': doc_frequency['d3.js'], 'LaTex': doc_frequency['latex'],
'SPSS': doc_frequency['spss'], 'D3': doc_frequency['d3'],
'STATA': doc_frequency['stata']})
big_data_dict = Counter({'Hadoop': doc_frequency['hadoop'], 'MapReduce': doc_frequency['mapreduce'],
'Spark': doc_frequency['spark'], 'Pig': doc_frequency['pig'],
'Hive': doc_frequency['hive'], 'Shark': doc_frequency['shark'],
'Oozie': doc_frequency['oozie'], 'ZooKeeper': doc_frequency['zookeeper'],
'Flume': doc_frequency['flume'], 'Mahout': doc_frequency['mahout']})
database_dict = Counter({'SQL': doc_frequency['sql'], 'NoSQL': doc_frequency['nosql'],
'HBase': doc_frequency['hbase'], 'Cassandra': doc_frequency['cassandra'],
'MongoDB': doc_frequency['mongodb']})
total_skills = language_dict + tool_dict + big_data_dict + database_dict
df = pd.DataFrame(list(total_skills.items()), columns = ['Skill', 'NumAds']) # Convert results into a dataframe
df['Percentage'] = df.NumAds / total_jobs_found * 100.0 # Percentage of job Ads having a certain skill
df.sort_values(by='Percentage', ascending=True, inplace=True) # Sort data for plottiing
# Visualization
plot = df.plot(x='Skill', y='Percentage', kind='barh', legend=False, color='skyblue',
title='Percentage of Data Scientist Job Ads with a Key Skill, ' + city_copy)
plot.set_xlabel('Percentage Appearing in Job Ads')
fig = plot.get_figure() # Convert the pandas plot object to a matplotlib object
plt.tight_layout()
return total_jobs_found, df, fig