From b0c40a9c1b335fc2a956fc1880a613e97d12938f Mon Sep 17 00:00:00 2001 From: Richard Purdie Date: Thu, 30 May 2013 12:28:05 +0000 Subject: package: Ensure we iterate all the pool objects There is the possibility that if we don't iterate through the multiprocessing pool objects we might not catch return codes and this could lead to hung/zombie processes either temproarily or on a wider scale. Adding this certainly doesn't hurt anything and is better practise so we might as well do it. Its not 100% clear if this fixes some issues or not. (From OE-Core master rev: 89c8493d4d85044cd72af2756569d15e87cd5947) (From OE-Core rev: e887858d495d772a4b2cd6ca4edc0c53942518d8) Signed-off-by: Richard Purdie --- meta/classes/package.bbclass | 4 ++-- 1 file changed, 2 insertions(+), 2 deletions(-) diff --git a/meta/classes/package.bbclass b/meta/classes/package.bbclass index affa34e808..551cafc51a 100644 --- a/meta/classes/package.bbclass +++ b/meta/classes/package.bbclass @@ -897,7 +897,7 @@ python split_and_strip_files () { import multiprocessing nproc = multiprocessing.cpu_count() pool = bb.utils.multiprocessingpool(nproc) - processed = pool.imap(oe.package.runstrip, sfiles) + processed = list(pool.imap(oe.package.runstrip, sfiles)) pool.close() pool.join() @@ -1249,7 +1249,7 @@ python package_do_filedeps() { import multiprocessing nproc = multiprocessing.cpu_count() pool = bb.utils.multiprocessingpool(nproc) - processed = pool.imap(oe.package.filedeprunner, pkglist) + processed = list(pool.imap(oe.package.filedeprunner, pkglist)) pool.close() pool.join() -- cgit v1.2.3-54-g00ecf