diff options
author | Richard Purdie <richard.purdie@linuxfoundation.org> | 2015-06-01 22:16:17 +0100 |
---|---|---|
committer | Richard Purdie <richard.purdie@linuxfoundation.org> | 2015-06-03 16:38:45 +0100 |
commit | 1f839f233ebe46421ada54d88a37713ea0a6bcad (patch) | |
tree | af2342a379d981980f57f0b4ea4f6b1eee99ce08 | |
parent | 5331bbc1ac797e7503dd7fcf9780b0eab7274fc7 (diff) | |
download | poky-1f839f233ebe46421ada54d88a37713ea0a6bcad.tar.gz |
sstate: Parallelise checkstatus calls for sstate mirror
Currently the urls are checked serially which is a performance bottleneck when
looking at http:// urls in particular. This adds code to check the url status in
parallel, mirroring the way we do this elsewhere.
We need the datastore for the fetcher so we use threads, not multiprocess.
(From OE-Core rev: 77c4865bbde4cd2a061cf333f9ad798afc6de0ef)
Signed-off-by: Richard Purdie <richard.purdie@linuxfoundation.org>
-rw-r--r-- | meta/classes/sstate.bbclass | 28 |
1 files changed, 20 insertions, 8 deletions
diff --git a/meta/classes/sstate.bbclass b/meta/classes/sstate.bbclass index b48504429f..de3519a69e 100644 --- a/meta/classes/sstate.bbclass +++ b/meta/classes/sstate.bbclass | |||
@@ -715,20 +715,16 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False): | |||
715 | if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1": | 715 | if localdata.getVar('BB_NO_NETWORK', True) == "1" and localdata.getVar('SSTATE_MIRROR_ALLOW_NETWORK', True) == "1": |
716 | localdata.delVar('BB_NO_NETWORK') | 716 | localdata.delVar('BB_NO_NETWORK') |
717 | 717 | ||
718 | for task in range(len(sq_fn)): | 718 | def checkstatus(arg): |
719 | if task in ret: | 719 | (task, sstatefile) = arg |
720 | continue | ||
721 | |||
722 | spec, extrapath, tname = getpathcomponents(task, d) | ||
723 | |||
724 | sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension) | ||
725 | 720 | ||
721 | localdata2 = bb.data.createCopy(localdata) | ||
726 | srcuri = "file://" + sstatefile | 722 | srcuri = "file://" + sstatefile |
727 | localdata.setVar('SRC_URI', srcuri) | 723 | localdata.setVar('SRC_URI', srcuri) |
728 | bb.debug(2, "SState: Attempting to fetch %s" % srcuri) | 724 | bb.debug(2, "SState: Attempting to fetch %s" % srcuri) |
729 | 725 | ||
730 | try: | 726 | try: |
731 | fetcher = bb.fetch2.Fetch(srcuri.split(), localdata) | 727 | fetcher = bb.fetch2.Fetch(srcuri.split(), localdata2) |
732 | fetcher.checkstatus() | 728 | fetcher.checkstatus() |
733 | bb.debug(2, "SState: Successful fetch test for %s" % srcuri) | 729 | bb.debug(2, "SState: Successful fetch test for %s" % srcuri) |
734 | ret.append(task) | 730 | ret.append(task) |
@@ -739,6 +735,22 @@ def sstate_checkhashes(sq_fn, sq_task, sq_hash, sq_hashfn, d, siginfo=False): | |||
739 | bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri) | 735 | bb.debug(2, "SState: Unsuccessful fetch test for %s" % srcuri) |
740 | pass | 736 | pass |
741 | 737 | ||
738 | tasklist = [] | ||
739 | for task in range(len(sq_fn)): | ||
740 | if task in ret: | ||
741 | continue | ||
742 | spec, extrapath, tname = getpathcomponents(task, d) | ||
743 | sstatefile = d.expand(extrapath + generate_sstatefn(spec, sq_hash[task], d) + "_" + tname + extension) | ||
744 | tasklist.append((task, sstatefile)) | ||
745 | |||
746 | if tasklist: | ||
747 | import multiprocessing | ||
748 | nproc = min(multiprocessing.cpu_count(), len(tasklist)) | ||
749 | pool = oe.utils.ThreadedPool(nproc) | ||
750 | for t in tasklist: | ||
751 | pool.add_task(checkstatus, t) | ||
752 | pool.wait_completion() | ||
753 | |||
742 | inheritlist = d.getVar("INHERIT", True) | 754 | inheritlist = d.getVar("INHERIT", True) |
743 | if "toaster" in inheritlist: | 755 | if "toaster" in inheritlist: |
744 | evdata = {'missed': [], 'found': []}; | 756 | evdata = {'missed': [], 'found': []}; |