use Queue.get() with timeout, not Process.join() for timeout
this was because the threads spawned by multiprocessing.Queue seemed to be making Process.join() believe the subprocess had not exited.
This commit is contained in:
parent
f2f09bf0ca
commit
d454f9b732
1 changed files with 7 additions and 7 deletions
|
@ -1,5 +1,5 @@
|
||||||
import contextlib, datetime, decimal, enum, io, ipaddress, re, signal
|
import contextlib, datetime, decimal, enum, io, ipaddress, multiprocessing
|
||||||
import threading, typing
|
import queue, re, signal, threading, typing
|
||||||
from src.utils import cli, consts, irc, http, parse, security
|
from src.utils import cli, consts, irc, http, parse, security
|
||||||
|
|
||||||
class Direction(enum.Enum):
|
class Direction(enum.Enum):
|
||||||
|
@ -390,18 +390,18 @@ def deadline_process(func: typing.Callable[[], None], seconds: int=10):
|
||||||
try:
|
try:
|
||||||
q.put([True, func()])
|
q.put([True, func()])
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
print(e)
|
|
||||||
q.put([False, e])
|
q.put([False, e])
|
||||||
|
q.close()
|
||||||
|
|
||||||
p = multiprocessing.Process(target=_wrap, args=(func, q))
|
p = multiprocessing.Process(target=_wrap, args=(func, q))
|
||||||
p.start()
|
p.start()
|
||||||
p.join(seconds)
|
|
||||||
|
|
||||||
if p.is_alive():
|
try:
|
||||||
p.terminate()
|
success, out = q.get(block=True, timeout=seconds)
|
||||||
|
except queue.Empty:
|
||||||
|
p.kill()
|
||||||
_raise_deadline()
|
_raise_deadline()
|
||||||
|
|
||||||
success, out = q.get(block=False)
|
|
||||||
if success:
|
if success:
|
||||||
return out
|
return out
|
||||||
else:
|
else:
|
||||||
|
|
Loading…
Reference in a new issue