Python: 如何在单独的进程中调用方法
Python: How to call method in separate process
我想在单独的进程中启动 ActorCore 方法,然后处理到达该 ActorCore 的消息。出于某种原因,此代码无法正常工作。
import queue
from multiprocessing import Process
class NotMessage(Exception):
def __str__(self):
return 'NotMessage exception'
class Message(object):
def Do(self, Actor):
# Do some stuff to the actor
pass
def __str__(self):
return 'Generic message'
class StopMessage(Message):
def Do(self, Actor):
Actor.__stopped = True
def __str__(self):
return 'Stop message'
class Actor(object):
__DebugName = ''
__MsgQ = None
__stopped = False
def __init__(self, Name):
self.__DebugName = Name
self.__MsgQ = queue.Queue()
def LaunchActor(self):
p = Process(target=self.ActorCore)
p.start()
return self.__MsgQ
def ActorCore(self):
while not self.__stopped:
Msg = self.__MsgQ.get(block=True)
try:
Msg.Do(self)
print(Msg)
except NotMessage as e:
print(str(e), ' occurred in ', self.__DebugName)
def main():
joe = Actor('Joe')
msg = Message()
stop = StopMessage()
qToJoe = joe.LaunchActor()
qToJoe.put(msg)
qToJoe.put(msg)
qToJoe.put(stop)
if __name__ == '__main__':
main()
当 运行:
时出现奇怪的错误
Traceback (most recent call last):
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 64, in <module>
main()
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 58, in main
qToJoe = joe.LaunchActor()
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 40, in LaunchActor
p.start()
File "C:\Program Files\Python35\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python35\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python35\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python35\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python35\lib\multiprocessing\reduction.py", line 59, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects
请帮忙!我什么都试过了:(
只需使用队列而不是队列:
删除 import queue
并将 Queue
添加到 from multiprocessing
,例如:
from multiprocessing import Process,Queue
然后将 self.__MsgQ = queue.Queue()
更改为 self.__MsgQ = Queue()
这就是您需要做的所有工作,其余的与您的情况相同。
编辑、解释:
queue.Queue
只是线程安全的,而 multiprocessing
实际上会产生另一个进程。因此,额外的 multiprocessing.Queue
被实现为也是进程安全的。作为另一种选择,如果需要多线程,threading
库可以与 queue.Queue
一起使用:https://docs.python.org/dev/library/threading.html#module-threading
附加信息:
根据您的进一步要求,另一个并行化选项是 joblib
,其中生成可以定义为进程或线程:https://joblib.readthedocs.io/
我想在单独的进程中启动 ActorCore 方法,然后处理到达该 ActorCore 的消息。出于某种原因,此代码无法正常工作。
import queue
from multiprocessing import Process
class NotMessage(Exception):
def __str__(self):
return 'NotMessage exception'
class Message(object):
def Do(self, Actor):
# Do some stuff to the actor
pass
def __str__(self):
return 'Generic message'
class StopMessage(Message):
def Do(self, Actor):
Actor.__stopped = True
def __str__(self):
return 'Stop message'
class Actor(object):
__DebugName = ''
__MsgQ = None
__stopped = False
def __init__(self, Name):
self.__DebugName = Name
self.__MsgQ = queue.Queue()
def LaunchActor(self):
p = Process(target=self.ActorCore)
p.start()
return self.__MsgQ
def ActorCore(self):
while not self.__stopped:
Msg = self.__MsgQ.get(block=True)
try:
Msg.Do(self)
print(Msg)
except NotMessage as e:
print(str(e), ' occurred in ', self.__DebugName)
def main():
joe = Actor('Joe')
msg = Message()
stop = StopMessage()
qToJoe = joe.LaunchActor()
qToJoe.put(msg)
qToJoe.put(msg)
qToJoe.put(stop)
if __name__ == '__main__':
main()
当 运行:
时出现奇怪的错误Traceback (most recent call last):
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 64, in <module>
main()
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 58, in main
qToJoe = joe.LaunchActor()
File "C:/Users/plkruczp/PycharmProjects/ActorFramework/Actor/Actor.py", line 40, in LaunchActor
p.start()
File "C:\Program Files\Python35\lib\multiprocessing\process.py", line 105, in start
self._popen = self._Popen(self)
File "C:\Program Files\Python35\lib\multiprocessing\context.py", line 212, in _Popen
return _default_context.get_context().Process._Popen(process_obj)
File "C:\Program Files\Python35\lib\multiprocessing\context.py", line 313, in _Popen
return Popen(process_obj)
File "C:\Program Files\Python35\lib\multiprocessing\popen_spawn_win32.py", line 66, in __init__
reduction.dump(process_obj, to_child)
File "C:\Program Files\Python35\lib\multiprocessing\reduction.py", line 59, in dump
ForkingPickler(file, protocol).dump(obj)
TypeError: can't pickle _thread.lock objects
请帮忙!我什么都试过了:(
只需使用队列而不是队列:
删除 import queue
并将 Queue
添加到 from multiprocessing
,例如:
from multiprocessing import Process,Queue
然后将 self.__MsgQ = queue.Queue()
更改为 self.__MsgQ = Queue()
这就是您需要做的所有工作,其余的与您的情况相同。
编辑、解释:
queue.Queue
只是线程安全的,而 multiprocessing
实际上会产生另一个进程。因此,额外的 multiprocessing.Queue
被实现为也是进程安全的。作为另一种选择,如果需要多线程,threading
库可以与 queue.Queue
一起使用:https://docs.python.org/dev/library/threading.html#module-threading
附加信息:
根据您的进一步要求,另一个并行化选项是 joblib
,其中生成可以定义为进程或线程:https://joblib.readthedocs.io/