Environment:
>>> cat /etc/os-release
NAME="KDE neon"
VERSION="5.25"
ID=neon
ID_LIKE="ubuntu debian"
PRETTY_NAME="KDE neon User - 5.25"
VARIANT="User Edition"
VARIANT_ID=user
VERSION_ID="20.04"
HOME_URL="https://neon.kde.org/"
SUPPORT_URL="https://neon.kde.org/"
BUG_REPORT_URL="https://bugs.kde.org/"
LOGO=start-here-kde-neon
PRIVACY_POLICY_URL="https://www.ubuntu.com/legal/terms-and-policies/privacy-policy"
VERSION_CODENAME=focal
UBUNTU_CODENAME=focal
>>> python3 --version
Python 3.8.10
>>> pip3 freeze
numpy==1.23.1
pkg_resources==0.0.0
Script
import multiprocessing as mp
import numpy as np
import time
from itertools import count
from multiprocessing.shared_memory import SharedMemory
BUFFER_SHAPE = (100, 1080, 1920, 3)
def producer(q: mp.Queue, sm: SharedMemory, steps: int):
global BUFFER_SHAPE
frame = np.zeros(shape=BUFFER_SHAPE[1:], dtype=np.uint8)
buffer = np.ndarray(BUFFER_SHAPE, dtype=np.uint8, buffer=sm.buf)
for i in range(0, steps):
index = i % BUFFER_SHAPE[0]
buffer[index] = frame.copy()
q.put(index)
time.sleep(0.042)
q.put(None)
def consumer(q: mp.Queue, sm: SharedMemory):
global BUFFER_SHAPE
performance = []
buffer = np.ndarray(BUFFER_SHAPE, dtype=np.uint8, buffer=sm.buf)
for i in count(0, 1):
index = q.get()
if index is None:
break
frame = buffer[index]
tic = time.perf_counter()
_ = frame.mean()
toc = time.perf_counter()
performance.append(toc - tic)
if i % 25 == 0:
print(f'{i:03d}: Latency: {np.mean(performance):.4f} sec')
if __name__ == "__main__":
queue = mp.Queue(maxsize=BUFFER_SHAPE[0])
shared_memory = SharedMemory(create=True, size=int(np.prod(BUFFER_SHAPE)))
p1 = mp.Process(target=producer, args=(queue, shared_memory, 500))
p2 = mp.Process(target=consumer, args=(queue, shared_memory))
p1.start()
p2.start()
p1.join()
p2.join()
shared_memory.close()
shared_memory.unlink()
If comment line time.sleep(0.042)
in producer()
, than _ = frame.mean()
in consumer()
will become faster. Why?
How does time.sleep()
in producer-process affect the speed of accessing data in shared memory in consumer-process?