The second article on the parallelism of calculations describes the use of the queue. Data is delivered in batches and each employee gets data when it is ready.

# imports
from multiprocessing import Process, Queue, cpu_count, Manager
import queue
# object initiation
principal = Queue()
result_dict = Manager().dict()
employees = []
n_cpu = cpu_count()
# process that takes data from the queue and processes it, if the queue is empty for 1 second, it throws an exception that assigns the result and ends the task
def employee(index, orders, result_dict):
    result = []
    while True:
        try:
            order = orders.get(timeout=1)
            for row in order:
                  result.append(row**(1/2))
        except queue.Empty:
            result_dict[index] = result
            break
# creating a list of processes and calling them
for index in range(n_cpu):
    p = Process(target=employee, args=(index, principal, result_dict))
    employees.append(p)
    p.start()

# adding data batches to the queue
for _ in range(1024):
    job = []
    for i in range(0,10000,4):
        job.append(i)
    principal.put(job)

# waiting for the tasks to finish
for i in range(n_cpu):
    employees[i].join()

# getting results from the dictionary
for key in result_dict.keys():
    print(key)