如何在多线程或多处理中 运行 编写脚本
How to run script in multithreading or multiprocessing
此脚本需要 2 秒才能完成,但如何运行 它在多个线程中并在 50 毫秒内完成
import urllib2
from threading import Thread
def btl_test(url):
page = urllib2.urlopen(url)
print page
url = ["http://google.com","http://example.com","http://yahoo.com","http://linkedin.com","http://orkut.com","http://quora.com","http://facebook.com","http://myspace.com","http://gmail.com","http://nltk.org","http://cyber.com"]
for i in url:
t = Thread(target = btl_test,args=(i,))
t.start()
如何对结果进行排序?
from contextlib import closing #
from multiprocessing.pool import Pool
with closing(Pool(len(url))) as pool:
pool.map(btl_test, url)
应该是方便的代码段。关于顺序,您可以使用元组分配映射并相应地打印它们。
更新:
根据此 blog pool.map
将 return 保留顺序的输出。这是在不更改顺序
的情况下以 (url, html_content) 格式打印元组列表的代码
urls = ["http://google.com","http://example.com","http://yahoo.com","http://linkedin.com","http://orkut.com","http://quora.com","http://facebook.com","http://myspace.com","http://gmail.com","http://nltk.org","http://cyber.com"]
def btl_test(url):
import urllib2
return url, urllib2.urlopen(url).read()
from contextlib import closing #
from multiprocessing.pool import Pool
with closing(Pool(len(urls))) as pool:
result = pool.map(btl_test, urls)
print result
尝试使用Queue()
和enumerate
来存储订单。
import threading
import requests
import Queue
class UrlReader(threading.Thread):
def __init__(self, queue, output):
super(UrlReader, self).__init__()
self.setDaemon = True
self.queue = queue
self.output = output
def run(self):
while True:
try:
target = self.queue.get(block=False)
data = requests.get(target[1])
print data.status_code
if data.status_code == 200:
self.queue.task_done()
self.output.put((data.url, target[0]), block=False)
else:
self.queue.task_done()
self.queue.put(target)
except Queue.Empty:
break
except requests.exceptions.ConnectionError:
self.queue.task_done()
self.queue.put(target)
def load(urlrange, num_threads):
mainqueue = Queue.Queue()
outq = Queue.Queue()
mythreads = []
for url in urlrange:
mainqueue.put(url)
for j in xrange(num_threads):
mythreads.append(UrlReader(mainqueue, outq))
mythreads[-1].start()
mainqueue.join()
for j in xrange(num_threads):
mythreads.append(UrlReader(mainqueue, outq))
mythreads[j].join()
return list(outq.__dict__['queue'])
urls = ["http://google.com","http://example.com","http://yahoo.com","http://linkedin.com","http://orkut.com","http://quora.com","http://facebook.com","http://myspace.com","http://gmail.com","http://nltk.org","http://cyber.com"]
print load(enumerate(urls), 10)
>>> [(6, 'http://facebook.com'), (9, 'http://nltk.org'), (0, 'http://google.com'), (1, 'http://example.com'), (2, 'http://yahoo.com'), (3, 'http://linkedin.com'), (4, 'http://orkut.com'), (5, 'http://quora.com'), (7, 'http://myspace.com'), (8, 'http://gmail.com'), (10, 'http://cyber.com')]
这个有效
from urlparse import urlparse
from multiprocessing.pool import Pool
import re
import urllib2
def btl_test(url):
page = urllib2.urlopen(url).read()
if (re.findall(r'<title>(.*?)<\/title>',page)):
page1 = (re.findall(r'<title>(.*?)<\/title>',page)[0])
print page1
url = ["http://google.com","http://example.com","http://yahoo.com","http://linkedin.com","http://facebook.com","http://orkut.com","http://oosing.com","http://pinterets.com","http://orkut.com","http://quora.com","http://facebook.com","http://myspace.com","http://gmail.com","http://nltk.org","http://cyber.com"]
#for i in url:
# print btl_test(i)
nprocs = 2 # nprocs is the number of processes to run
ParsePool = Pool(nprocs)
ParsePool.map(btl_test,url)
#ParsedURLS = ParsePool.map(btl_test,url)
#print ParsedURLS
很有帮助
此脚本需要 2 秒才能完成,但如何运行 它在多个线程中并在 50 毫秒内完成
import urllib2
from threading import Thread
def btl_test(url):
page = urllib2.urlopen(url)
print page
url = ["http://google.com","http://example.com","http://yahoo.com","http://linkedin.com","http://orkut.com","http://quora.com","http://facebook.com","http://myspace.com","http://gmail.com","http://nltk.org","http://cyber.com"]
for i in url:
t = Thread(target = btl_test,args=(i,))
t.start()
如何对结果进行排序?
from contextlib import closing #
from multiprocessing.pool import Pool
with closing(Pool(len(url))) as pool:
pool.map(btl_test, url)
应该是方便的代码段。关于顺序,您可以使用元组分配映射并相应地打印它们。
更新:
根据此 blog pool.map
将 return 保留顺序的输出。这是在不更改顺序
urls = ["http://google.com","http://example.com","http://yahoo.com","http://linkedin.com","http://orkut.com","http://quora.com","http://facebook.com","http://myspace.com","http://gmail.com","http://nltk.org","http://cyber.com"]
def btl_test(url):
import urllib2
return url, urllib2.urlopen(url).read()
from contextlib import closing #
from multiprocessing.pool import Pool
with closing(Pool(len(urls))) as pool:
result = pool.map(btl_test, urls)
print result
尝试使用Queue()
和enumerate
来存储订单。
import threading
import requests
import Queue
class UrlReader(threading.Thread):
def __init__(self, queue, output):
super(UrlReader, self).__init__()
self.setDaemon = True
self.queue = queue
self.output = output
def run(self):
while True:
try:
target = self.queue.get(block=False)
data = requests.get(target[1])
print data.status_code
if data.status_code == 200:
self.queue.task_done()
self.output.put((data.url, target[0]), block=False)
else:
self.queue.task_done()
self.queue.put(target)
except Queue.Empty:
break
except requests.exceptions.ConnectionError:
self.queue.task_done()
self.queue.put(target)
def load(urlrange, num_threads):
mainqueue = Queue.Queue()
outq = Queue.Queue()
mythreads = []
for url in urlrange:
mainqueue.put(url)
for j in xrange(num_threads):
mythreads.append(UrlReader(mainqueue, outq))
mythreads[-1].start()
mainqueue.join()
for j in xrange(num_threads):
mythreads.append(UrlReader(mainqueue, outq))
mythreads[j].join()
return list(outq.__dict__['queue'])
urls = ["http://google.com","http://example.com","http://yahoo.com","http://linkedin.com","http://orkut.com","http://quora.com","http://facebook.com","http://myspace.com","http://gmail.com","http://nltk.org","http://cyber.com"]
print load(enumerate(urls), 10)
>>> [(6, 'http://facebook.com'), (9, 'http://nltk.org'), (0, 'http://google.com'), (1, 'http://example.com'), (2, 'http://yahoo.com'), (3, 'http://linkedin.com'), (4, 'http://orkut.com'), (5, 'http://quora.com'), (7, 'http://myspace.com'), (8, 'http://gmail.com'), (10, 'http://cyber.com')]
这个有效
from urlparse import urlparse
from multiprocessing.pool import Pool
import re
import urllib2
def btl_test(url):
page = urllib2.urlopen(url).read()
if (re.findall(r'<title>(.*?)<\/title>',page)):
page1 = (re.findall(r'<title>(.*?)<\/title>',page)[0])
print page1
url = ["http://google.com","http://example.com","http://yahoo.com","http://linkedin.com","http://facebook.com","http://orkut.com","http://oosing.com","http://pinterets.com","http://orkut.com","http://quora.com","http://facebook.com","http://myspace.com","http://gmail.com","http://nltk.org","http://cyber.com"]
#for i in url:
# print btl_test(i)
nprocs = 2 # nprocs is the number of processes to run
ParsePool = Pool(nprocs)
ParsePool.map(btl_test,url)
#ParsedURLS = ParsePool.map(btl_test,url)
#print ParsedURLS
很有帮助