forked from REMitchell/python-scraping
-
Notifications
You must be signed in to change notification settings - Fork 0
Expand file tree
/
Copy pathmultiprocess_queue.py
More file actions
56 lines (47 loc) · 1.9 KB
/
multiprocess_queue.py
File metadata and controls
56 lines (47 loc) · 1.9 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
from urllib.request import urlopen
from bs4 import BeautifulSoup
import re
import random
from multiprocessing import Process, Queue
import os
import time
def task_delegator(taskQueue, foundUrlsQueue):
#Initialize with a task for each process
visited = ['/wiki/Kevin_Bacon', '/wiki/Monty_Python']
taskQueue.put('/wiki/Kevin_Bacon')
taskQueue.put('/wiki/Monty_Python')
while 1:
#Check to see if there are new links in the foundUrlsQueue for processing
if not foundUrlsQueue.empty():
links = [link for link in foundUrlsQueue.get() if link not in visited]
for link in links:
#Add new link to the taskQueue
taskQueue.put(link)
#Add new link to the visited list
visited.append(link)
def get_links(bsObj):
links = bsObj.find('div', {'id':'bodyContent'}).find_all('a', href=re.compile('^(/wiki/)((?!:).)*$'))
return [link.attrs['href'] for link in links]
def scrape_article(taskQueue, foundUrlsQueue):
while 1:
while taskQueue.empty():
#Sleep 100 ms while waiting for the task queue
#This should be rare
time.sleep(.1)
path = taskQueue.get()
html = urlopen('http://en.wikipedia.org{}'.format(path))
time.sleep(5)
bsObj = BeautifulSoup(html, 'html.parser')
title = bsObj.find('h1').get_text()
print('Scraping {} in process {}'.format(title, os.getpid()))
links = get_links(bsObj)
#Send these to the delegator for processing
foundUrlsQueue.put(links)
processes = []
taskQueue = Queue()
foundUrlsQueue = Queue()
processes.append(Process(target=task_delegator, args=(taskQueue, foundUrlsQueue,)))
processes.append(Process(target=scrape_article, args=(taskQueue, foundUrlsQueue,)))
processes.append(Process(target=scrape_article, args=(taskQueue, foundUrlsQueue,)))
for p in processes:
p.start()