From c527fd1f14c27855a37f2e8ac5346ce8d940ced2 Mon Sep 17 00:00:00 2001 From: Tudor Florea Date: Thu, 16 Oct 2014 03:05:19 +0200 Subject: initial commit for Enea Linux 4.0-140929 Migrated from the internal git server on the daisy-enea-point-release branch Signed-off-by: Tudor Florea --- bitbake/lib/bb/COW.py | 323 +++ bitbake/lib/bb/__init__.py | 143 ++ bitbake/lib/bb/build.py | 709 +++++++ bitbake/lib/bb/cache.py | 847 ++++++++ bitbake/lib/bb/cache_extra.py | 75 + bitbake/lib/bb/checksum.py | 90 + bitbake/lib/bb/codeparser.py | 328 +++ bitbake/lib/bb/command.py | 444 ++++ bitbake/lib/bb/compat.py | 6 + bitbake/lib/bb/cooker.py | 1874 +++++++++++++++++ bitbake/lib/bb/cookerdata.py | 305 +++ bitbake/lib/bb/daemonize.py | 190 ++ bitbake/lib/bb/data.py | 403 ++++ bitbake/lib/bb/data_smart.py | 804 ++++++++ bitbake/lib/bb/event.py | 641 ++++++ bitbake/lib/bb/exceptions.py | 91 + bitbake/lib/bb/fetch2/__init__.py | 1575 ++++++++++++++ bitbake/lib/bb/fetch2/bzr.py | 143 ++ bitbake/lib/bb/fetch2/cvs.py | 171 ++ bitbake/lib/bb/fetch2/git.py | 355 ++++ bitbake/lib/bb/fetch2/gitannex.py | 76 + bitbake/lib/bb/fetch2/gitsm.py | 126 ++ bitbake/lib/bb/fetch2/hg.py | 187 ++ bitbake/lib/bb/fetch2/local.py | 116 ++ bitbake/lib/bb/fetch2/osc.py | 135 ++ bitbake/lib/bb/fetch2/perforce.py | 194 ++ bitbake/lib/bb/fetch2/repo.py | 98 + bitbake/lib/bb/fetch2/sftp.py | 129 ++ bitbake/lib/bb/fetch2/ssh.py | 127 ++ bitbake/lib/bb/fetch2/svn.py | 191 ++ bitbake/lib/bb/fetch2/wget.py | 106 + bitbake/lib/bb/methodpool.py | 29 + bitbake/lib/bb/monitordisk.py | 265 +++ bitbake/lib/bb/msg.py | 196 ++ bitbake/lib/bb/namedtuple_with_abc.py | 255 +++ bitbake/lib/bb/parse/__init__.py | 157 ++ bitbake/lib/bb/parse/ast.py | 478 +++++ bitbake/lib/bb/parse/parse_py/BBHandler.py | 267 +++ bitbake/lib/bb/parse/parse_py/ConfHandler.py | 189 ++ bitbake/lib/bb/parse/parse_py/__init__.py | 33 + bitbake/lib/bb/persist_data.py | 215 ++ bitbake/lib/bb/process.py | 133 ++ bitbake/lib/bb/providers.py | 381 ++++ bitbake/lib/bb/pysh/__init__.py | 0 bitbake/lib/bb/pysh/builtin.py | 710 +++++++ bitbake/lib/bb/pysh/interp.py | 1367 +++++++++++++ bitbake/lib/bb/pysh/lsprof.py | 116 ++ bitbake/lib/bb/pysh/pysh.py | 167 ++ bitbake/lib/bb/pysh/pyshlex.py | 888 ++++++++ bitbake/lib/bb/pysh/pyshyacc.py | 779 +++++++ bitbake/lib/bb/pysh/sherrors.py | 41 + bitbake/lib/bb/pysh/subprocess_fix.py | 77 + bitbake/lib/bb/runqueue.py | 2154 ++++++++++++++++++++ bitbake/lib/bb/server/__init__.py | 96 + bitbake/lib/bb/server/process.py | 236 +++ bitbake/lib/bb/server/xmlrpc.py | 392 ++++ bitbake/lib/bb/shell.py | 820 ++++++++ bitbake/lib/bb/siggen.py | 483 +++++ bitbake/lib/bb/taskdata.py | 651 ++++++ bitbake/lib/bb/tests/__init__.py | 0 bitbake/lib/bb/tests/codeparser.py | 375 ++++ bitbake/lib/bb/tests/cow.py | 136 ++ bitbake/lib/bb/tests/data.py | 296 +++ bitbake/lib/bb/tests/fetch.py | 562 +++++ bitbake/lib/bb/tests/utils.py | 53 + bitbake/lib/bb/tinfoil.py | 96 + bitbake/lib/bb/ui/__init__.py | 17 + bitbake/lib/bb/ui/buildinfohelper.py | 964 +++++++++ bitbake/lib/bb/ui/crumbs/__init__.py | 17 + bitbake/lib/bb/ui/crumbs/builddetailspage.py | 437 ++++ bitbake/lib/bb/ui/crumbs/builder.py | 1475 ++++++++++++++ bitbake/lib/bb/ui/crumbs/buildmanager.py | 455 +++++ bitbake/lib/bb/ui/crumbs/hig/__init__.py | 0 .../lib/bb/ui/crumbs/hig/advancedsettingsdialog.py | 341 ++++ bitbake/lib/bb/ui/crumbs/hig/crumbsdialog.py | 44 + .../lib/bb/ui/crumbs/hig/crumbsmessagedialog.py | 70 + bitbake/lib/bb/ui/crumbs/hig/deployimagedialog.py | 219 ++ .../lib/bb/ui/crumbs/hig/imageselectiondialog.py | 172 ++ .../lib/bb/ui/crumbs/hig/layerselectiondialog.py | 298 +++ .../lib/bb/ui/crumbs/hig/parsingwarningsdialog.py | 163 ++ bitbake/lib/bb/ui/crumbs/hig/propertydialog.py | 437 ++++ bitbake/lib/bb/ui/crumbs/hig/proxydetailsdialog.py | 90 + .../lib/bb/ui/crumbs/hig/retrieveimagedialog.py | 51 + bitbake/lib/bb/ui/crumbs/hig/saveimagedialog.py | 159 ++ bitbake/lib/bb/ui/crumbs/hig/settingsuihelper.py | 122 ++ .../lib/bb/ui/crumbs/hig/simplesettingsdialog.py | 894 ++++++++ bitbake/lib/bb/ui/crumbs/hobcolor.py | 38 + bitbake/lib/bb/ui/crumbs/hobeventhandler.py | 639 ++++++ bitbake/lib/bb/ui/crumbs/hoblistmodel.py | 903 ++++++++ bitbake/lib/bb/ui/crumbs/hobpages.py | 128 ++ bitbake/lib/bb/ui/crumbs/hobwidget.py | 904 ++++++++ bitbake/lib/bb/ui/crumbs/imageconfigurationpage.py | 561 +++++ bitbake/lib/bb/ui/crumbs/imagedetailspage.py | 669 ++++++ bitbake/lib/bb/ui/crumbs/packageselectionpage.py | 355 ++++ bitbake/lib/bb/ui/crumbs/persistenttooltip.py | 186 ++ bitbake/lib/bb/ui/crumbs/progress.py | 23 + bitbake/lib/bb/ui/crumbs/progressbar.py | 59 + bitbake/lib/bb/ui/crumbs/puccho.glade | 606 ++++++ bitbake/lib/bb/ui/crumbs/recipeselectionpage.py | 335 +++ bitbake/lib/bb/ui/crumbs/runningbuild.py | 551 +++++ bitbake/lib/bb/ui/crumbs/sanitycheckpage.py | 85 + bitbake/lib/bb/ui/crumbs/utils.py | 34 + bitbake/lib/bb/ui/depexp.py | 326 +++ bitbake/lib/bb/ui/goggle.py | 121 ++ bitbake/lib/bb/ui/hob.py | 109 + bitbake/lib/bb/ui/icons/images/images_display.png | Bin 0 -> 6898 bytes bitbake/lib/bb/ui/icons/images/images_hover.png | Bin 0 -> 7051 bytes bitbake/lib/bb/ui/icons/indicators/add-hover.png | Bin 0 -> 1212 bytes bitbake/lib/bb/ui/icons/indicators/add.png | Bin 0 -> 1176 bytes bitbake/lib/bb/ui/icons/indicators/alert.png | Bin 0 -> 3954 bytes .../lib/bb/ui/icons/indicators/confirmation.png | Bin 0 -> 5789 bytes bitbake/lib/bb/ui/icons/indicators/denied.png | Bin 0 -> 3955 bytes bitbake/lib/bb/ui/icons/indicators/error.png | Bin 0 -> 6482 bytes bitbake/lib/bb/ui/icons/indicators/info.png | Bin 0 -> 3311 bytes bitbake/lib/bb/ui/icons/indicators/issues.png | Bin 0 -> 4549 bytes bitbake/lib/bb/ui/icons/indicators/refresh.png | Bin 0 -> 5250 bytes .../lib/bb/ui/icons/indicators/remove-hover.png | Bin 0 -> 2809 bytes bitbake/lib/bb/ui/icons/indicators/remove.png | Bin 0 -> 1971 bytes bitbake/lib/bb/ui/icons/indicators/tick.png | Bin 0 -> 4563 bytes bitbake/lib/bb/ui/icons/info/info_display.png | Bin 0 -> 4117 bytes bitbake/lib/bb/ui/icons/info/info_hover.png | Bin 0 -> 4167 bytes bitbake/lib/bb/ui/icons/layers/layers_display.png | Bin 0 -> 4840 bytes bitbake/lib/bb/ui/icons/layers/layers_hover.png | Bin 0 -> 5257 bytes .../lib/bb/ui/icons/packages/packages_display.png | Bin 0 -> 7011 bytes .../lib/bb/ui/icons/packages/packages_hover.png | Bin 0 -> 7121 bytes bitbake/lib/bb/ui/icons/recipe/recipe_display.png | Bin 0 -> 4723 bytes bitbake/lib/bb/ui/icons/recipe/recipe_hover.png | Bin 0 -> 4866 bytes .../lib/bb/ui/icons/settings/settings_display.png | Bin 0 -> 6076 bytes .../lib/bb/ui/icons/settings/settings_hover.png | Bin 0 -> 6269 bytes .../bb/ui/icons/templates/templates_display.png | Bin 0 -> 5651 bytes .../lib/bb/ui/icons/templates/templates_hover.png | Bin 0 -> 5791 bytes bitbake/lib/bb/ui/knotty.py | 550 +++++ bitbake/lib/bb/ui/ncurses.py | 373 ++++ bitbake/lib/bb/ui/puccho.py | 425 ++++ bitbake/lib/bb/ui/toasterui.py | 292 +++ bitbake/lib/bb/ui/uievent.py | 133 ++ bitbake/lib/bb/ui/uihelper.py | 100 + bitbake/lib/bb/utils.py | 878 ++++++++ 138 files changed, 39903 insertions(+) create mode 100644 bitbake/lib/bb/COW.py create mode 100644 bitbake/lib/bb/__init__.py create mode 100644 bitbake/lib/bb/build.py create mode 100644 bitbake/lib/bb/cache.py create mode 100644 bitbake/lib/bb/cache_extra.py create mode 100644 bitbake/lib/bb/checksum.py create mode 100644 bitbake/lib/bb/codeparser.py create mode 100644 bitbake/lib/bb/command.py create mode 100644 bitbake/lib/bb/compat.py create mode 100644 bitbake/lib/bb/cooker.py create mode 100644 bitbake/lib/bb/cookerdata.py create mode 100644 bitbake/lib/bb/daemonize.py create mode 100644 bitbake/lib/bb/data.py create mode 100644 bitbake/lib/bb/data_smart.py create mode 100644 bitbake/lib/bb/event.py create mode 100644 bitbake/lib/bb/exceptions.py create mode 100644 bitbake/lib/bb/fetch2/__init__.py create mode 100644 bitbake/lib/bb/fetch2/bzr.py create mode 100644 bitbake/lib/bb/fetch2/cvs.py create mode 100644 bitbake/lib/bb/fetch2/git.py create mode 100644 bitbake/lib/bb/fetch2/gitannex.py create mode 100644 bitbake/lib/bb/fetch2/gitsm.py create mode 100644 bitbake/lib/bb/fetch2/hg.py create mode 100644 bitbake/lib/bb/fetch2/local.py create mode 100644 bitbake/lib/bb/fetch2/osc.py create mode 100644 bitbake/lib/bb/fetch2/perforce.py create mode 100644 bitbake/lib/bb/fetch2/repo.py create mode 100644 bitbake/lib/bb/fetch2/sftp.py create mode 100644 bitbake/lib/bb/fetch2/ssh.py create mode 100644 bitbake/lib/bb/fetch2/svn.py create mode 100644 bitbake/lib/bb/fetch2/wget.py create mode 100644 bitbake/lib/bb/methodpool.py create mode 100644 bitbake/lib/bb/monitordisk.py create mode 100644 bitbake/lib/bb/msg.py create mode 100644 bitbake/lib/bb/namedtuple_with_abc.py create mode 100644 bitbake/lib/bb/parse/__init__.py create mode 100644 bitbake/lib/bb/parse/ast.py create mode 100644 bitbake/lib/bb/parse/parse_py/BBHandler.py create mode 100644 bitbake/lib/bb/parse/parse_py/ConfHandler.py create mode 100644 bitbake/lib/bb/parse/parse_py/__init__.py create mode 100644 bitbake/lib/bb/persist_data.py create mode 100644 bitbake/lib/bb/process.py create mode 100644 bitbake/lib/bb/providers.py create mode 100644 bitbake/lib/bb/pysh/__init__.py create mode 100644 bitbake/lib/bb/pysh/builtin.py create mode 100644 bitbake/lib/bb/pysh/interp.py create mode 100644 bitbake/lib/bb/pysh/lsprof.py create mode 100644 bitbake/lib/bb/pysh/pysh.py create mode 100644 bitbake/lib/bb/pysh/pyshlex.py create mode 100644 bitbake/lib/bb/pysh/pyshyacc.py create mode 100644 bitbake/lib/bb/pysh/sherrors.py create mode 100644 bitbake/lib/bb/pysh/subprocess_fix.py create mode 100644 bitbake/lib/bb/runqueue.py create mode 100644 bitbake/lib/bb/server/__init__.py create mode 100644 bitbake/lib/bb/server/process.py create mode 100644 bitbake/lib/bb/server/xmlrpc.py create mode 100644 bitbake/lib/bb/shell.py create mode 100644 bitbake/lib/bb/siggen.py create mode 100644 bitbake/lib/bb/taskdata.py create mode 100644 bitbake/lib/bb/tests/__init__.py create mode 100644 bitbake/lib/bb/tests/codeparser.py create mode 100644 bitbake/lib/bb/tests/cow.py create mode 100644 bitbake/lib/bb/tests/data.py create mode 100644 bitbake/lib/bb/tests/fetch.py create mode 100644 bitbake/lib/bb/tests/utils.py create mode 100644 bitbake/lib/bb/tinfoil.py create mode 100644 bitbake/lib/bb/ui/__init__.py create mode 100644 bitbake/lib/bb/ui/buildinfohelper.py create mode 100644 bitbake/lib/bb/ui/crumbs/__init__.py create mode 100755 bitbake/lib/bb/ui/crumbs/builddetailspage.py create mode 100755 bitbake/lib/bb/ui/crumbs/builder.py create mode 100644 bitbake/lib/bb/ui/crumbs/buildmanager.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/__init__.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/advancedsettingsdialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/crumbsdialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/crumbsmessagedialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/deployimagedialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/imageselectiondialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/layerselectiondialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/parsingwarningsdialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/propertydialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/proxydetailsdialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/retrieveimagedialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/saveimagedialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/settingsuihelper.py create mode 100644 bitbake/lib/bb/ui/crumbs/hig/simplesettingsdialog.py create mode 100644 bitbake/lib/bb/ui/crumbs/hobcolor.py create mode 100644 bitbake/lib/bb/ui/crumbs/hobeventhandler.py create mode 100644 bitbake/lib/bb/ui/crumbs/hoblistmodel.py create mode 100755 bitbake/lib/bb/ui/crumbs/hobpages.py create mode 100644 bitbake/lib/bb/ui/crumbs/hobwidget.py create mode 100644 bitbake/lib/bb/ui/crumbs/imageconfigurationpage.py create mode 100755 bitbake/lib/bb/ui/crumbs/imagedetailspage.py create mode 100755 bitbake/lib/bb/ui/crumbs/packageselectionpage.py create mode 100644 bitbake/lib/bb/ui/crumbs/persistenttooltip.py create mode 100644 bitbake/lib/bb/ui/crumbs/progress.py create mode 100644 bitbake/lib/bb/ui/crumbs/progressbar.py create mode 100644 bitbake/lib/bb/ui/crumbs/puccho.glade create mode 100755 bitbake/lib/bb/ui/crumbs/recipeselectionpage.py create mode 100644 bitbake/lib/bb/ui/crumbs/runningbuild.py create mode 100644 bitbake/lib/bb/ui/crumbs/sanitycheckpage.py create mode 100644 bitbake/lib/bb/ui/crumbs/utils.py create mode 100644 bitbake/lib/bb/ui/depexp.py create mode 100644 bitbake/lib/bb/ui/goggle.py create mode 100755 bitbake/lib/bb/ui/hob.py create mode 100644 bitbake/lib/bb/ui/icons/images/images_display.png create mode 100644 bitbake/lib/bb/ui/icons/images/images_hover.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/add-hover.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/add.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/alert.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/confirmation.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/denied.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/error.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/info.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/issues.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/refresh.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/remove-hover.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/remove.png create mode 100644 bitbake/lib/bb/ui/icons/indicators/tick.png create mode 100644 bitbake/lib/bb/ui/icons/info/info_display.png create mode 100644 bitbake/lib/bb/ui/icons/info/info_hover.png create mode 100644 bitbake/lib/bb/ui/icons/layers/layers_display.png create mode 100644 bitbake/lib/bb/ui/icons/layers/layers_hover.png create mode 100644 bitbake/lib/bb/ui/icons/packages/packages_display.png create mode 100644 bitbake/lib/bb/ui/icons/packages/packages_hover.png create mode 100644 bitbake/lib/bb/ui/icons/recipe/recipe_display.png create mode 100644 bitbake/lib/bb/ui/icons/recipe/recipe_hover.png create mode 100644 bitbake/lib/bb/ui/icons/settings/settings_display.png create mode 100644 bitbake/lib/bb/ui/icons/settings/settings_hover.png create mode 100644 bitbake/lib/bb/ui/icons/templates/templates_display.png create mode 100644 bitbake/lib/bb/ui/icons/templates/templates_hover.png create mode 100644 bitbake/lib/bb/ui/knotty.py create mode 100644 bitbake/lib/bb/ui/ncurses.py create mode 100644 bitbake/lib/bb/ui/puccho.py create mode 100644 bitbake/lib/bb/ui/toasterui.py create mode 100644 bitbake/lib/bb/ui/uievent.py create mode 100644 bitbake/lib/bb/ui/uihelper.py create mode 100644 bitbake/lib/bb/utils.py (limited to 'bitbake/lib/bb') diff --git a/bitbake/lib/bb/COW.py b/bitbake/lib/bb/COW.py new file mode 100644 index 0000000000..6917ec378a --- /dev/null +++ b/bitbake/lib/bb/COW.py @@ -0,0 +1,323 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# This is a copy on write dictionary and set which abuses classes to try and be nice and fast. +# +# Copyright (C) 2006 Tim Amsell +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +#Please Note: +# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW. +# Assign a file to __warn__ to get warnings about slow operations. +# + +from __future__ import print_function +import copy +import types +ImmutableTypes = ( + types.NoneType, + bool, + complex, + float, + int, + long, + tuple, + frozenset, + basestring +) + +MUTABLE = "__mutable__" + +class COWMeta(type): + pass + +class COWDictMeta(COWMeta): + __warn__ = False + __hasmutable__ = False + __marker__ = tuple() + + def __str__(cls): + # FIXME: I have magic numbers! + return "" % (cls.__count__, len(cls.__dict__) - 3) + __repr__ = __str__ + + def cow(cls): + class C(cls): + __count__ = cls.__count__ + 1 + return C + copy = cow + __call__ = cow + + def __setitem__(cls, key, value): + if not isinstance(value, ImmutableTypes): + if not isinstance(value, COWMeta): + cls.__hasmutable__ = True + key += MUTABLE + setattr(cls, key, value) + + def __getmutable__(cls, key, readonly=False): + nkey = key + MUTABLE + try: + return cls.__dict__[nkey] + except KeyError: + pass + + value = getattr(cls, nkey) + if readonly: + return value + + if not cls.__warn__ is False and not isinstance(value, COWMeta): + print("Warning: Doing a copy because %s is a mutable type." % key, file=cls.__warn__) + try: + value = value.copy() + except AttributeError as e: + value = copy.copy(value) + setattr(cls, nkey, value) + return value + + __getmarker__ = [] + def __getreadonly__(cls, key, default=__getmarker__): + """\ + Get a value (even if mutable) which you promise not to change. + """ + return cls.__getitem__(key, default, True) + + def __getitem__(cls, key, default=__getmarker__, readonly=False): + try: + try: + value = getattr(cls, key) + except AttributeError: + value = cls.__getmutable__(key, readonly) + + # This is for values which have been deleted + if value is cls.__marker__: + raise AttributeError("key %s does not exist." % key) + + return value + except AttributeError as e: + if not default is cls.__getmarker__: + return default + + raise KeyError(str(e)) + + def __delitem__(cls, key): + cls.__setitem__(key, cls.__marker__) + + def __revertitem__(cls, key): + if not cls.__dict__.has_key(key): + key += MUTABLE + delattr(cls, key) + + def __contains__(cls, key): + return cls.has_key(key) + + def has_key(cls, key): + value = cls.__getreadonly__(key, cls.__marker__) + if value is cls.__marker__: + return False + return True + + def iter(cls, type, readonly=False): + for key in dir(cls): + if key.startswith("__"): + continue + + if key.endswith(MUTABLE): + key = key[:-len(MUTABLE)] + + if type == "keys": + yield key + + try: + if readonly: + value = cls.__getreadonly__(key) + else: + value = cls[key] + except KeyError: + continue + + if type == "values": + yield value + if type == "items": + yield (key, value) + raise StopIteration() + + def iterkeys(cls): + return cls.iter("keys") + def itervalues(cls, readonly=False): + if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: + print("Warning: If you arn't going to change any of the values call with True.", file=cls.__warn__) + return cls.iter("values", readonly) + def iteritems(cls, readonly=False): + if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: + print("Warning: If you arn't going to change any of the values call with True.", file=cls.__warn__) + return cls.iter("items", readonly) + +class COWSetMeta(COWDictMeta): + def __str__(cls): + # FIXME: I have magic numbers! + return "" % (cls.__count__, len(cls.__dict__) -3) + __repr__ = __str__ + + def cow(cls): + class C(cls): + __count__ = cls.__count__ + 1 + return C + + def add(cls, value): + COWDictMeta.__setitem__(cls, repr(hash(value)), value) + + def remove(cls, value): + COWDictMeta.__delitem__(cls, repr(hash(value))) + + def __in__(cls, value): + return COWDictMeta.has_key(repr(hash(value))) + + def iterkeys(cls): + raise TypeError("sets don't have keys") + + def iteritems(cls): + raise TypeError("sets don't have 'items'") + +# These are the actual classes you use! +class COWDictBase(object): + __metaclass__ = COWDictMeta + __count__ = 0 + +class COWSetBase(object): + __metaclass__ = COWSetMeta + __count__ = 0 + +if __name__ == "__main__": + import sys + COWDictBase.__warn__ = sys.stderr + a = COWDictBase() + print("a", a) + + a['a'] = 'a' + a['b'] = 'b' + a['dict'] = {} + + b = a.copy() + print("b", b) + b['c'] = 'b' + + print() + + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(): + print(x) + print() + + b['dict']['a'] = 'b' + b['a'] = 'c' + + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(): + print(x) + print() + + try: + b['dict2'] + except KeyError as e: + print("Okay!") + + a['set'] = COWSetBase() + a['set'].add("o1") + a['set'].add("o1") + a['set'].add("o2") + + print("a", a) + for x in a['set'].itervalues(): + print(x) + print("--") + print("b", b) + for x in b['set'].itervalues(): + print(x) + print() + + b['set'].add('o3') + + print("a", a) + for x in a['set'].itervalues(): + print(x) + print("--") + print("b", b) + for x in b['set'].itervalues(): + print(x) + print() + + a['set2'] = set() + a['set2'].add("o1") + a['set2'].add("o1") + a['set2'].add("o2") + + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(readonly=True): + print(x) + print() + + del b['b'] + try: + print(b['b']) + except KeyError: + print("Yay! deleted key raises error") + + if b.has_key('b'): + print("Boo!") + else: + print("Yay - has_key with delete works!") + + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(readonly=True): + print(x) + print() + + b.__revertitem__('b') + + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(readonly=True): + print(x) + print() + + b.__revertitem__('dict') + print("a", a) + for x in a.iteritems(): + print(x) + print("--") + print("b", b) + for x in b.iteritems(readonly=True): + print(x) + print() diff --git a/bitbake/lib/bb/__init__.py b/bitbake/lib/bb/__init__.py new file mode 100644 index 0000000000..30a974582c --- /dev/null +++ b/bitbake/lib/bb/__init__.py @@ -0,0 +1,143 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# BitBake Build System Python Library +# +# Copyright (C) 2003 Holger Schurig +# Copyright (C) 2003, 2004 Chris Larson +# +# Based on Gentoo's portage.py. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +__version__ = "1.23.0" + +import sys +if sys.version_info < (2, 7, 3): + raise RuntimeError("Sorry, python 2.7.3 or later is required for this version of bitbake") + + +class BBHandledException(Exception): + """ + The big dilemma for generic bitbake code is what information to give the user + when an exception occurs. Any exception inheriting this base exception class + has already provided information to the user via some 'fired' message type such as + an explicitly fired event using bb.fire, or a bb.error message. If bitbake + encounters an exception derived from this class, no backtrace or other information + will be given to the user, its assumed the earlier event provided the relevant information. + """ + pass + +import os +import logging + + +class NullHandler(logging.Handler): + def emit(self, record): + pass + +Logger = logging.getLoggerClass() +class BBLogger(Logger): + def __init__(self, name): + if name.split(".")[0] == "BitBake": + self.debug = self.bbdebug + Logger.__init__(self, name) + + def bbdebug(self, level, msg, *args, **kwargs): + return self.log(logging.DEBUG - level + 1, msg, *args, **kwargs) + + def plain(self, msg, *args, **kwargs): + return self.log(logging.INFO + 1, msg, *args, **kwargs) + + def verbose(self, msg, *args, **kwargs): + return self.log(logging.INFO - 1, msg, *args, **kwargs) + +logging.raiseExceptions = False +logging.setLoggerClass(BBLogger) + +logger = logging.getLogger("BitBake") +logger.addHandler(NullHandler()) +logger.setLevel(logging.DEBUG - 2) + +# This has to be imported after the setLoggerClass, as the import of bb.msg +# can result in construction of the various loggers. +import bb.msg + +from bb import fetch2 as fetch +sys.modules['bb.fetch'] = sys.modules['bb.fetch2'] + +# Messaging convenience functions +def plain(*args): + logger.plain(''.join(args)) + +def debug(lvl, *args): + if isinstance(lvl, basestring): + logger.warn("Passed invalid debug level '%s' to bb.debug", lvl) + args = (lvl,) + args + lvl = 1 + logger.debug(lvl, ''.join(args)) + +def note(*args): + logger.info(''.join(args)) + +def warn(*args): + logger.warn(''.join(args)) + +def error(*args): + logger.error(''.join(args)) + +def fatal(*args): + logger.critical(''.join(args)) + sys.exit(1) + + +def deprecated(func, name=None, advice=""): + """This is a decorator which can be used to mark functions + as deprecated. It will result in a warning being emmitted + when the function is used.""" + import warnings + + if advice: + advice = ": %s" % advice + if name is None: + name = func.__name__ + + def newFunc(*args, **kwargs): + warnings.warn("Call to deprecated function %s%s." % (name, + advice), + category=DeprecationWarning, + stacklevel=2) + return func(*args, **kwargs) + newFunc.__name__ = func.__name__ + newFunc.__doc__ = func.__doc__ + newFunc.__dict__.update(func.__dict__) + return newFunc + +# For compatibility +def deprecate_import(current, modulename, fromlist, renames = None): + """Import objects from one module into another, wrapping them with a DeprecationWarning""" + import sys + + module = __import__(modulename, fromlist = fromlist) + for position, objname in enumerate(fromlist): + obj = getattr(module, objname) + newobj = deprecated(obj, "{0}.{1}".format(current, objname), + "Please use {0}.{1} instead".format(modulename, objname)) + if renames: + newname = renames[position] + else: + newname = objname + + setattr(sys.modules[current], newname, newobj) + diff --git a/bitbake/lib/bb/build.py b/bitbake/lib/bb/build.py new file mode 100644 index 0000000000..5cb4c06a88 --- /dev/null +++ b/bitbake/lib/bb/build.py @@ -0,0 +1,709 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# BitBake 'Build' implementation +# +# Core code for function execution and task handling in the +# BitBake build tools. +# +# Copyright (C) 2003, 2004 Chris Larson +# +# Based on Gentoo's portage.py. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +#Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import sys +import logging +import shlex +import glob +import time +import bb +import bb.msg +import bb.process +from contextlib import nested +from bb import event, utils + +bblogger = logging.getLogger('BitBake') +logger = logging.getLogger('BitBake.Build') + +NULL = open(os.devnull, 'r+') + + +# When we execute a python function we'd like certain things +# in all namespaces, hence we add them to __builtins__ +# If we do not do this and use the exec globals, they will +# not be available to subfunctions. +__builtins__['bb'] = bb +__builtins__['os'] = os + +class FuncFailed(Exception): + def __init__(self, name = None, logfile = None): + self.logfile = logfile + self.name = name + if name: + self.msg = 'Function failed: %s' % name + else: + self.msg = "Function failed" + + def __str__(self): + if self.logfile and os.path.exists(self.logfile): + msg = ("%s (log file is located at %s)" % + (self.msg, self.logfile)) + else: + msg = self.msg + return msg + +class TaskBase(event.Event): + """Base class for task events""" + + def __init__(self, t, logfile, d): + self._task = t + self._package = d.getVar("PF", True) + self.taskfile = d.getVar("FILE", True) + self.taskname = self._task + self.logfile = logfile + self.time = time.time() + event.Event.__init__(self) + self._message = "recipe %s: task %s: %s" % (d.getVar("PF", True), t, self.getDisplayName()) + + def getTask(self): + return self._task + + def setTask(self, task): + self._task = task + + def getDisplayName(self): + return bb.event.getName(self)[4:] + + task = property(getTask, setTask, None, "task property") + +class TaskStarted(TaskBase): + """Task execution started""" + def __init__(self, t, logfile, taskflags, d): + super(TaskStarted, self).__init__(t, logfile, d) + self.taskflags = taskflags + +class TaskSucceeded(TaskBase): + """Task execution completed""" + +class TaskFailed(TaskBase): + """Task execution failed""" + + def __init__(self, task, logfile, metadata, errprinted = False): + self.errprinted = errprinted + super(TaskFailed, self).__init__(task, logfile, metadata) + +class TaskFailedSilent(TaskBase): + """Task execution failed (silently)""" + def getDisplayName(self): + # Don't need to tell the user it was silent + return "Failed" + +class TaskInvalid(TaskBase): + + def __init__(self, task, metadata): + super(TaskInvalid, self).__init__(task, None, metadata) + self._message = "No such task '%s'" % task + + +class LogTee(object): + def __init__(self, logger, outfile): + self.outfile = outfile + self.logger = logger + self.name = self.outfile.name + + def write(self, string): + self.logger.plain(string) + self.outfile.write(string) + + def __enter__(self): + self.outfile.__enter__() + return self + + def __exit__(self, *excinfo): + self.outfile.__exit__(*excinfo) + + def __repr__(self): + return ''.format(self.name) + def flush(self): + self.outfile.flush() + +def exec_func(func, d, dirs = None): + """Execute an BB 'function'""" + + body = d.getVar(func) + if not body: + if body is None: + logger.warn("Function %s doesn't exist", func) + return + + flags = d.getVarFlags(func) + cleandirs = flags.get('cleandirs') + if cleandirs: + for cdir in d.expand(cleandirs).split(): + bb.utils.remove(cdir, True) + bb.utils.mkdirhier(cdir) + + if dirs is None: + dirs = flags.get('dirs') + if dirs: + dirs = d.expand(dirs).split() + + if dirs: + for adir in dirs: + bb.utils.mkdirhier(adir) + adir = dirs[-1] + else: + adir = d.getVar('B', True) + bb.utils.mkdirhier(adir) + + ispython = flags.get('python') + + lockflag = flags.get('lockfiles') + if lockflag: + lockfiles = [f for f in d.expand(lockflag).split()] + else: + lockfiles = None + + tempdir = d.getVar('T', True) + + # or func allows items to be executed outside of the normal + # task set, such as buildhistory + task = d.getVar('BB_RUNTASK', True) or func + if task == func: + taskfunc = task + else: + taskfunc = "%s.%s" % (task, func) + + runfmt = d.getVar('BB_RUNFMT', True) or "run.{func}.{pid}" + runfn = runfmt.format(taskfunc=taskfunc, task=task, func=func, pid=os.getpid()) + runfile = os.path.join(tempdir, runfn) + bb.utils.mkdirhier(os.path.dirname(runfile)) + + # Setup the courtesy link to the runfn, only for tasks + # we create the link 'just' before the run script is created + # if we create it after, and if the run script fails, then the + # link won't be created as an exception would be fired. + if task == func: + runlink = os.path.join(tempdir, 'run.{0}'.format(task)) + if runlink: + bb.utils.remove(runlink) + + try: + os.symlink(runfn, runlink) + except OSError: + pass + + with bb.utils.fileslocked(lockfiles): + if ispython: + exec_func_python(func, d, runfile, cwd=adir) + else: + exec_func_shell(func, d, runfile, cwd=adir) + +_functionfmt = """ +def {function}(d): +{body} + +{function}(d) +""" +logformatter = bb.msg.BBLogFormatter("%(levelname)s: %(message)s") +def exec_func_python(func, d, runfile, cwd=None): + """Execute a python BB 'function'""" + + bbfile = d.getVar('FILE', True) + code = _functionfmt.format(function=func, body=d.getVar(func, True)) + bb.utils.mkdirhier(os.path.dirname(runfile)) + with open(runfile, 'w') as script: + script.write(code) + + if cwd: + try: + olddir = os.getcwd() + except OSError: + olddir = None + os.chdir(cwd) + + bb.debug(2, "Executing python function %s" % func) + + try: + comp = utils.better_compile(code, func, bbfile) + utils.better_exec(comp, {"d": d}, code, bbfile) + except: + if sys.exc_info()[0] in (bb.parse.SkipPackage, bb.build.FuncFailed): + raise + + raise FuncFailed(func, None) + finally: + bb.debug(2, "Python function %s finished" % func) + + if cwd and olddir: + try: + os.chdir(olddir) + except OSError: + pass + +def shell_trap_code(): + return '''#!/bin/sh\n +# Emit a useful diagnostic if something fails: +bb_exit_handler() { + ret=$? + case $ret in + 0) ;; + *) case $BASH_VERSION in + "") echo "WARNING: exit code $ret from a shell command.";; + *) echo "WARNING: ${BASH_SOURCE[0]}:${BASH_LINENO[0]} exit $ret from + \"$BASH_COMMAND\"";; + esac + exit $ret + esac +} +trap 'bb_exit_handler' 0 +set -e +''' + +def exec_func_shell(func, d, runfile, cwd=None): + """Execute a shell function from the metadata + + Note on directory behavior. The 'dirs' varflag should contain a list + of the directories you need created prior to execution. The last + item in the list is where we will chdir/cd to. + """ + + # Don't let the emitted shell script override PWD + d.delVarFlag('PWD', 'export') + + with open(runfile, 'w') as script: + script.write(shell_trap_code()) + + bb.data.emit_func(func, script, d) + + if bb.msg.loggerVerboseLogs: + script.write("set -x\n") + if cwd: + script.write("cd '%s'\n" % cwd) + script.write("%s\n" % func) + script.write(''' +# cleanup +ret=$? +trap '' 0 +exit $? +''') + + os.chmod(runfile, 0775) + + cmd = runfile + if d.getVarFlag(func, 'fakeroot'): + fakerootcmd = d.getVar('FAKEROOT', True) + if fakerootcmd: + cmd = [fakerootcmd, runfile] + + if bb.msg.loggerDefaultVerbose: + logfile = LogTee(logger, sys.stdout) + else: + logfile = sys.stdout + + bb.debug(2, "Executing shell function %s" % func) + + try: + with open(os.devnull, 'r+') as stdin: + bb.process.run(cmd, shell=False, stdin=stdin, log=logfile) + except bb.process.CmdError: + logfn = d.getVar('BB_LOGFILE', True) + raise FuncFailed(func, logfn) + + bb.debug(2, "Shell function %s finished" % func) + +def _task_data(fn, task, d): + localdata = bb.data.createCopy(d) + localdata.setVar('BB_FILENAME', fn) + localdata.setVar('BB_CURRENTTASK', task[3:]) + localdata.setVar('OVERRIDES', 'task-%s:%s' % + (task[3:].replace('_', '-'), d.getVar('OVERRIDES', False))) + localdata.finalize() + bb.data.expandKeys(localdata) + return localdata + +def _exec_task(fn, task, d, quieterr): + """Execute a BB 'task' + + Execution of a task involves a bit more setup than executing a function, + running it with its own local metadata, and with some useful variables set. + """ + if not d.getVarFlag(task, 'task'): + event.fire(TaskInvalid(task, d), d) + logger.error("No such task: %s" % task) + return 1 + + logger.debug(1, "Executing task %s", task) + + localdata = _task_data(fn, task, d) + tempdir = localdata.getVar('T', True) + if not tempdir: + bb.fatal("T variable not set, unable to build") + + # Change nice level if we're asked to + nice = localdata.getVar("BB_TASK_NICE_LEVEL", True) + if nice: + curnice = os.nice(0) + nice = int(nice) - curnice + newnice = os.nice(nice) + logger.debug(1, "Renice to %s " % newnice) + + bb.utils.mkdirhier(tempdir) + + # Determine the logfile to generate + logfmt = localdata.getVar('BB_LOGFMT', True) or 'log.{task}.{pid}' + logbase = logfmt.format(task=task, pid=os.getpid()) + + # Document the order of the tasks... + logorder = os.path.join(tempdir, 'log.task_order') + try: + with open(logorder, 'a') as logorderfile: + logorderfile.write('{0} ({1}): {2}\n'.format(task, os.getpid(), logbase)) + except OSError: + logger.exception("Opening log file '%s'", logorder) + pass + + # Setup the courtesy link to the logfn + loglink = os.path.join(tempdir, 'log.{0}'.format(task)) + logfn = os.path.join(tempdir, logbase) + if loglink: + bb.utils.remove(loglink) + + try: + os.symlink(logbase, loglink) + except OSError: + pass + + prefuncs = localdata.getVarFlag(task, 'prefuncs', expand=True) + postfuncs = localdata.getVarFlag(task, 'postfuncs', expand=True) + + class ErrorCheckHandler(logging.Handler): + def __init__(self): + self.triggered = False + logging.Handler.__init__(self, logging.ERROR) + def emit(self, record): + self.triggered = True + + # Handle logfiles + si = open('/dev/null', 'r') + try: + bb.utils.mkdirhier(os.path.dirname(logfn)) + logfile = open(logfn, 'w') + except OSError: + logger.exception("Opening log file '%s'", logfn) + pass + + # Dup the existing fds so we dont lose them + osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()] + oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()] + ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()] + + # Replace those fds with our own + os.dup2(si.fileno(), osi[1]) + os.dup2(logfile.fileno(), oso[1]) + os.dup2(logfile.fileno(), ose[1]) + + # Ensure python logging goes to the logfile + handler = logging.StreamHandler(logfile) + handler.setFormatter(logformatter) + # Always enable full debug output into task logfiles + handler.setLevel(logging.DEBUG - 2) + bblogger.addHandler(handler) + + errchk = ErrorCheckHandler() + bblogger.addHandler(errchk) + + localdata.setVar('BB_LOGFILE', logfn) + localdata.setVar('BB_RUNTASK', task) + + flags = localdata.getVarFlags(task) + + event.fire(TaskStarted(task, logfn, flags, localdata), localdata) + try: + for func in (prefuncs or '').split(): + exec_func(func, localdata) + exec_func(task, localdata) + for func in (postfuncs or '').split(): + exec_func(func, localdata) + except FuncFailed as exc: + if quieterr: + event.fire(TaskFailedSilent(task, logfn, localdata), localdata) + else: + errprinted = errchk.triggered + logger.error(str(exc)) + event.fire(TaskFailed(task, logfn, localdata, errprinted), localdata) + return 1 + finally: + sys.stdout.flush() + sys.stderr.flush() + + bblogger.removeHandler(handler) + + # Restore the backup fds + os.dup2(osi[0], osi[1]) + os.dup2(oso[0], oso[1]) + os.dup2(ose[0], ose[1]) + + # Close the backup fds + os.close(osi[0]) + os.close(oso[0]) + os.close(ose[0]) + si.close() + + logfile.close() + if os.path.exists(logfn) and os.path.getsize(logfn) == 0: + logger.debug(2, "Zero size logfn %s, removing", logfn) + bb.utils.remove(logfn) + bb.utils.remove(loglink) + event.fire(TaskSucceeded(task, logfn, localdata), localdata) + + if not localdata.getVarFlag(task, 'nostamp') and not localdata.getVarFlag(task, 'selfstamp'): + make_stamp(task, localdata) + + return 0 + +def exec_task(fn, task, d, profile = False): + try: + quieterr = False + if d.getVarFlag(task, "quieterrors") is not None: + quieterr = True + + if profile: + profname = "profile-%s.log" % (d.getVar("PN", True) + "-" + task) + try: + import cProfile as profile + except: + import profile + prof = profile.Profile() + ret = profile.Profile.runcall(prof, _exec_task, fn, task, d, quieterr) + prof.dump_stats(profname) + bb.utils.process_profilelog(profname) + + return ret + else: + return _exec_task(fn, task, d, quieterr) + + except Exception: + from traceback import format_exc + if not quieterr: + logger.error("Build of %s failed" % (task)) + logger.error(format_exc()) + failedevent = TaskFailed(task, None, d, True) + event.fire(failedevent, d) + return 1 + +def stamp_internal(taskname, d, file_name): + """ + Internal stamp helper function + Makes sure the stamp directory exists + Returns the stamp path+filename + + In the bitbake core, d can be a CacheData and file_name will be set. + When called in task context, d will be a data store, file_name will not be set + """ + taskflagname = taskname + if taskname.endswith("_setscene") and taskname != "do_setscene": + taskflagname = taskname.replace("_setscene", "") + + if file_name: + stamp = d.stamp_base[file_name].get(taskflagname) or d.stamp[file_name] + extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or "" + else: + stamp = d.getVarFlag(taskflagname, 'stamp-base', True) or d.getVar('STAMP', True) + file_name = d.getVar('BB_FILENAME', True) + extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or "" + + if not stamp: + return + + stamp = bb.parse.siggen.stampfile(stamp, file_name, taskname, extrainfo) + + stampdir = os.path.dirname(stamp) + if bb.parse.cached_mtime_noerror(stampdir) == 0: + bb.utils.mkdirhier(stampdir) + + return stamp + +def stamp_cleanmask_internal(taskname, d, file_name): + """ + Internal stamp helper function to generate stamp cleaning mask + Returns the stamp path+filename + + In the bitbake core, d can be a CacheData and file_name will be set. + When called in task context, d will be a data store, file_name will not be set + """ + taskflagname = taskname + if taskname.endswith("_setscene") and taskname != "do_setscene": + taskflagname = taskname.replace("_setscene", "") + + if file_name: + stamp = d.stamp_base_clean[file_name].get(taskflagname) or d.stampclean[file_name] + extrainfo = d.stamp_extrainfo[file_name].get(taskflagname) or "" + else: + stamp = d.getVarFlag(taskflagname, 'stamp-base-clean', True) or d.getVar('STAMPCLEAN', True) + file_name = d.getVar('BB_FILENAME', True) + extrainfo = d.getVarFlag(taskflagname, 'stamp-extra-info', True) or "" + + if not stamp: + return [] + + cleanmask = bb.parse.siggen.stampcleanmask(stamp, file_name, taskname, extrainfo) + + return [cleanmask, cleanmask.replace(taskflagname, taskflagname + "_setscene")] + +def make_stamp(task, d, file_name = None): + """ + Creates/updates a stamp for a given task + (d can be a data dict or dataCache) + """ + cleanmask = stamp_cleanmask_internal(task, d, file_name) + for mask in cleanmask: + for name in glob.glob(mask): + # Preserve sigdata files in the stamps directory + if "sigdata" in name: + continue + # Preserve taint files in the stamps directory + if name.endswith('.taint'): + continue + os.unlink(name) + + stamp = stamp_internal(task, d, file_name) + # Remove the file and recreate to force timestamp + # change on broken NFS filesystems + if stamp: + bb.utils.remove(stamp) + open(stamp, "w").close() + + # If we're in task context, write out a signature file for each task + # as it completes + if not task.endswith("_setscene") and task != "do_setscene" and not file_name: + file_name = d.getVar('BB_FILENAME', True) + bb.parse.siggen.dump_sigtask(file_name, task, d.getVar('STAMP', True), True) + +def del_stamp(task, d, file_name = None): + """ + Removes a stamp for a given task + (d can be a data dict or dataCache) + """ + stamp = stamp_internal(task, d, file_name) + bb.utils.remove(stamp) + +def write_taint(task, d, file_name = None): + """ + Creates a "taint" file which will force the specified task and its + dependents to be re-run the next time by influencing the value of its + taskhash. + (d can be a data dict or dataCache) + """ + import uuid + if file_name: + taintfn = d.stamp[file_name] + '.' + task + '.taint' + else: + taintfn = d.getVar('STAMP', True) + '.' + task + '.taint' + bb.utils.mkdirhier(os.path.dirname(taintfn)) + # The specific content of the taint file is not really important, + # we just need it to be random, so a random UUID is used + with open(taintfn, 'w') as taintf: + taintf.write(str(uuid.uuid4())) + +def stampfile(taskname, d, file_name = None): + """ + Return the stamp for a given task + (d can be a data dict or dataCache) + """ + return stamp_internal(taskname, d, file_name) + +def add_tasks(tasklist, deltasklist, d): + task_deps = d.getVar('_task_deps') + if not task_deps: + task_deps = {} + if not 'tasks' in task_deps: + task_deps['tasks'] = [] + if not 'parents' in task_deps: + task_deps['parents'] = {} + + for task in tasklist: + task = d.expand(task) + + if task in deltasklist: + continue + + d.setVarFlag(task, 'task', 1) + + if not task in task_deps['tasks']: + task_deps['tasks'].append(task) + + flags = d.getVarFlags(task) + def getTask(name): + if not name in task_deps: + task_deps[name] = {} + if name in flags: + deptask = d.expand(flags[name]) + task_deps[name][task] = deptask + getTask('depends') + getTask('rdepends') + getTask('deptask') + getTask('rdeptask') + getTask('recrdeptask') + getTask('recideptask') + getTask('nostamp') + getTask('fakeroot') + getTask('noexec') + getTask('umask') + task_deps['parents'][task] = [] + if 'deps' in flags: + for dep in flags['deps']: + dep = d.expand(dep) + task_deps['parents'][task].append(dep) + + # don't assume holding a reference + d.setVar('_task_deps', task_deps) + +def addtask(task, before, after, d): + if task[:3] != "do_": + task = "do_" + task + + d.setVarFlag(task, "task", 1) + bbtasks = d.getVar('__BBTASKS') or [] + if not task in bbtasks: + bbtasks.append(task) + d.setVar('__BBTASKS', bbtasks) + + existing = d.getVarFlag(task, "deps") or [] + if after is not None: + # set up deps for function + for entry in after.split(): + if entry not in existing: + existing.append(entry) + d.setVarFlag(task, "deps", existing) + if before is not None: + # set up things that depend on this func + for entry in before.split(): + existing = d.getVarFlag(entry, "deps") or [] + if task not in existing: + d.setVarFlag(entry, "deps", [task] + existing) + +def deltask(task, d): + if task[:3] != "do_": + task = "do_" + task + + bbtasks = d.getVar('__BBDELTASKS') or [] + if not task in bbtasks: + bbtasks.append(task) + d.setVar('__BBDELTASKS', bbtasks) + diff --git a/bitbake/lib/bb/cache.py b/bitbake/lib/bb/cache.py new file mode 100644 index 0000000000..431fc079e4 --- /dev/null +++ b/bitbake/lib/bb/cache.py @@ -0,0 +1,847 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# BitBake Cache implementation +# +# Caching of bitbake variables before task execution + +# Copyright (C) 2006 Richard Purdie +# Copyright (C) 2012 Intel Corporation + +# but small sections based on code from bin/bitbake: +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer +# Copyright (C) 2005 Holger Hans Peter Freyther +# Copyright (C) 2005 ROAD GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + + +import os +import logging +from collections import defaultdict +import bb.utils + +logger = logging.getLogger("BitBake.Cache") + +try: + import cPickle as pickle +except ImportError: + import pickle + logger.info("Importing cPickle failed. " + "Falling back to a very slow implementation.") + +__cache_version__ = "147" + +def getCacheFile(path, filename, data_hash): + return os.path.join(path, filename + "." + data_hash) + +# RecipeInfoCommon defines common data retrieving methods +# from meta data for caches. CoreRecipeInfo as well as other +# Extra RecipeInfo needs to inherit this class +class RecipeInfoCommon(object): + + @classmethod + def listvar(cls, var, metadata): + return cls.getvar(var, metadata).split() + + @classmethod + def intvar(cls, var, metadata): + return int(cls.getvar(var, metadata) or 0) + + @classmethod + def depvar(cls, var, metadata): + return bb.utils.explode_deps(cls.getvar(var, metadata)) + + @classmethod + def pkgvar(cls, var, packages, metadata): + return dict((pkg, cls.depvar("%s_%s" % (var, pkg), metadata)) + for pkg in packages) + + @classmethod + def taskvar(cls, var, tasks, metadata): + return dict((task, cls.getvar("%s_task-%s" % (var, task), metadata)) + for task in tasks) + + @classmethod + def flaglist(cls, flag, varlist, metadata, squash=False): + out_dict = dict((var, metadata.getVarFlag(var, flag, True)) + for var in varlist) + if squash: + return dict((k,v) for (k,v) in out_dict.iteritems() if v) + else: + return out_dict + + @classmethod + def getvar(cls, var, metadata): + return metadata.getVar(var, True) or '' + + +class CoreRecipeInfo(RecipeInfoCommon): + __slots__ = () + + cachefile = "bb_cache.dat" + + def __init__(self, filename, metadata): + self.file_depends = metadata.getVar('__depends', False) + self.timestamp = bb.parse.cached_mtime(filename) + self.variants = self.listvar('__VARIANTS', metadata) + [''] + self.appends = self.listvar('__BBAPPEND', metadata) + self.nocache = self.getvar('__BB_DONT_CACHE', metadata) + + self.skipreason = self.getvar('__SKIPPED', metadata) + if self.skipreason: + self.pn = self.getvar('PN', metadata) or bb.parse.BBHandler.vars_from_file(filename,metadata)[0] + self.skipped = True + self.provides = self.depvar('PROVIDES', metadata) + self.rprovides = self.depvar('RPROVIDES', metadata) + return + + self.tasks = metadata.getVar('__BBTASKS', False) + + self.pn = self.getvar('PN', metadata) + self.packages = self.listvar('PACKAGES', metadata) + if not self.pn in self.packages: + self.packages.append(self.pn) + + self.basetaskhashes = self.taskvar('BB_BASEHASH', self.tasks, metadata) + self.hashfilename = self.getvar('BB_HASHFILENAME', metadata) + + self.task_deps = metadata.getVar('_task_deps', False) or {'tasks': [], 'parents': {}} + + self.skipped = False + self.pe = self.getvar('PE', metadata) + self.pv = self.getvar('PV', metadata) + self.pr = self.getvar('PR', metadata) + self.defaultpref = self.intvar('DEFAULT_PREFERENCE', metadata) + self.not_world = self.getvar('EXCLUDE_FROM_WORLD', metadata) + self.stamp = self.getvar('STAMP', metadata) + self.stampclean = self.getvar('STAMPCLEAN', metadata) + self.stamp_base = self.flaglist('stamp-base', self.tasks, metadata) + self.stamp_base_clean = self.flaglist('stamp-base-clean', self.tasks, metadata) + self.stamp_extrainfo = self.flaglist('stamp-extra-info', self.tasks, metadata) + self.file_checksums = self.flaglist('file-checksums', self.tasks, metadata, True) + self.packages_dynamic = self.listvar('PACKAGES_DYNAMIC', metadata) + self.depends = self.depvar('DEPENDS', metadata) + self.provides = self.depvar('PROVIDES', metadata) + self.rdepends = self.depvar('RDEPENDS', metadata) + self.rprovides = self.depvar('RPROVIDES', metadata) + self.rrecommends = self.depvar('RRECOMMENDS', metadata) + self.rprovides_pkg = self.pkgvar('RPROVIDES', self.packages, metadata) + self.rdepends_pkg = self.pkgvar('RDEPENDS', self.packages, metadata) + self.rrecommends_pkg = self.pkgvar('RRECOMMENDS', self.packages, metadata) + self.inherits = self.getvar('__inherit_cache', metadata) + self.fakerootenv = self.getvar('FAKEROOTENV', metadata) + self.fakerootdirs = self.getvar('FAKEROOTDIRS', metadata) + self.fakerootnoenv = self.getvar('FAKEROOTNOENV', metadata) + + @classmethod + def init_cacheData(cls, cachedata): + # CacheData in Core RecipeInfo Class + cachedata.task_deps = {} + cachedata.pkg_fn = {} + cachedata.pkg_pn = defaultdict(list) + cachedata.pkg_pepvpr = {} + cachedata.pkg_dp = {} + + cachedata.stamp = {} + cachedata.stampclean = {} + cachedata.stamp_base = {} + cachedata.stamp_base_clean = {} + cachedata.stamp_extrainfo = {} + cachedata.file_checksums = {} + cachedata.fn_provides = {} + cachedata.pn_provides = defaultdict(list) + cachedata.all_depends = [] + + cachedata.deps = defaultdict(list) + cachedata.packages = defaultdict(list) + cachedata.providers = defaultdict(list) + cachedata.rproviders = defaultdict(list) + cachedata.packages_dynamic = defaultdict(list) + + cachedata.rundeps = defaultdict(lambda: defaultdict(list)) + cachedata.runrecs = defaultdict(lambda: defaultdict(list)) + cachedata.possible_world = [] + cachedata.universe_target = [] + cachedata.hashfn = {} + + cachedata.basetaskhash = {} + cachedata.inherits = {} + cachedata.fakerootenv = {} + cachedata.fakerootnoenv = {} + cachedata.fakerootdirs = {} + + def add_cacheData(self, cachedata, fn): + cachedata.task_deps[fn] = self.task_deps + cachedata.pkg_fn[fn] = self.pn + cachedata.pkg_pn[self.pn].append(fn) + cachedata.pkg_pepvpr[fn] = (self.pe, self.pv, self.pr) + cachedata.pkg_dp[fn] = self.defaultpref + cachedata.stamp[fn] = self.stamp + cachedata.stampclean[fn] = self.stampclean + cachedata.stamp_base[fn] = self.stamp_base + cachedata.stamp_base_clean[fn] = self.stamp_base_clean + cachedata.stamp_extrainfo[fn] = self.stamp_extrainfo + cachedata.file_checksums[fn] = self.file_checksums + + provides = [self.pn] + for provide in self.provides: + if provide not in provides: + provides.append(provide) + cachedata.fn_provides[fn] = provides + + for provide in provides: + cachedata.providers[provide].append(fn) + if provide not in cachedata.pn_provides[self.pn]: + cachedata.pn_provides[self.pn].append(provide) + + for dep in self.depends: + if dep not in cachedata.deps[fn]: + cachedata.deps[fn].append(dep) + if dep not in cachedata.all_depends: + cachedata.all_depends.append(dep) + + rprovides = self.rprovides + for package in self.packages: + cachedata.packages[package].append(fn) + rprovides += self.rprovides_pkg[package] + + for rprovide in rprovides: + cachedata.rproviders[rprovide].append(fn) + + for package in self.packages_dynamic: + cachedata.packages_dynamic[package].append(fn) + + # Build hash of runtime depends and rececommends + for package in self.packages + [self.pn]: + cachedata.rundeps[fn][package] = list(self.rdepends) + self.rdepends_pkg[package] + cachedata.runrecs[fn][package] = list(self.rrecommends) + self.rrecommends_pkg[package] + + # Collect files we may need for possible world-dep + # calculations + if not self.not_world: + cachedata.possible_world.append(fn) + + # create a collection of all targets for sanity checking + # tasks, such as upstream versions, license, and tools for + # task and image creation. + cachedata.universe_target.append(self.pn) + + cachedata.hashfn[fn] = self.hashfilename + for task, taskhash in self.basetaskhashes.iteritems(): + identifier = '%s.%s' % (fn, task) + cachedata.basetaskhash[identifier] = taskhash + + cachedata.inherits[fn] = self.inherits + cachedata.fakerootenv[fn] = self.fakerootenv + cachedata.fakerootnoenv[fn] = self.fakerootnoenv + cachedata.fakerootdirs[fn] = self.fakerootdirs + + + +class Cache(object): + """ + BitBake Cache implementation + """ + + def __init__(self, data, data_hash, caches_array): + # Pass caches_array information into Cache Constructor + # It will be used in later for deciding whether we + # need extra cache file dump/load support + self.caches_array = caches_array + self.cachedir = data.getVar("CACHE", True) + self.clean = set() + self.checked = set() + self.depends_cache = {} + self.data = None + self.data_fn = None + self.cacheclean = True + self.data_hash = data_hash + + if self.cachedir in [None, '']: + self.has_cache = False + logger.info("Not using a cache. " + "Set CACHE = to enable.") + return + + self.has_cache = True + self.cachefile = getCacheFile(self.cachedir, "bb_cache.dat", self.data_hash) + + logger.debug(1, "Using cache in '%s'", self.cachedir) + bb.utils.mkdirhier(self.cachedir) + + cache_ok = True + if self.caches_array: + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon): + cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) + cache_ok = cache_ok and os.path.exists(cachefile) + cache_class.init_cacheData(self) + if cache_ok: + self.load_cachefile() + elif os.path.isfile(self.cachefile): + logger.info("Out of date cache found, rebuilding...") + + def load_cachefile(self): + # Firstly, using core cache file information for + # valid checking + with open(self.cachefile, "rb") as cachefile: + pickled = pickle.Unpickler(cachefile) + try: + cache_ver = pickled.load() + bitbake_ver = pickled.load() + except Exception: + logger.info('Invalid cache, rebuilding...') + return + + if cache_ver != __cache_version__: + logger.info('Cache version mismatch, rebuilding...') + return + elif bitbake_ver != bb.__version__: + logger.info('Bitbake version mismatch, rebuilding...') + return + + + cachesize = 0 + previous_progress = 0 + previous_percent = 0 + + # Calculate the correct cachesize of all those cache files + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon): + cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) + with open(cachefile, "rb") as cachefile: + cachesize += os.fstat(cachefile.fileno()).st_size + + bb.event.fire(bb.event.CacheLoadStarted(cachesize), self.data) + + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon): + cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) + with open(cachefile, "rb") as cachefile: + pickled = pickle.Unpickler(cachefile) + while cachefile: + try: + key = pickled.load() + value = pickled.load() + except Exception: + break + if self.depends_cache.has_key(key): + self.depends_cache[key].append(value) + else: + self.depends_cache[key] = [value] + # only fire events on even percentage boundaries + current_progress = cachefile.tell() + previous_progress + current_percent = 100 * current_progress / cachesize + if current_percent > previous_percent: + previous_percent = current_percent + bb.event.fire(bb.event.CacheLoadProgress(current_progress, cachesize), + self.data) + + previous_progress += current_progress + + # Note: depends cache number is corresponding to the parsing file numbers. + # The same file has several caches, still regarded as one item in the cache + bb.event.fire(bb.event.CacheLoadCompleted(cachesize, + len(self.depends_cache)), + self.data) + + + @staticmethod + def virtualfn2realfn(virtualfn): + """ + Convert a virtual file name to a real one + the associated subclass keyword + """ + + fn = virtualfn + cls = "" + if virtualfn.startswith('virtual:'): + elems = virtualfn.split(':') + cls = ":".join(elems[1:-1]) + fn = elems[-1] + return (fn, cls) + + @staticmethod + def realfn2virtual(realfn, cls): + """ + Convert a real filename + the associated subclass keyword to a virtual filename + """ + if cls == "": + return realfn + return "virtual:" + cls + ":" + realfn + + @classmethod + def loadDataFull(cls, virtualfn, appends, cfgData): + """ + Return a complete set of data for fn. + To do this, we need to parse the file. + """ + + (fn, virtual) = cls.virtualfn2realfn(virtualfn) + + logger.debug(1, "Parsing %s (full)", fn) + + cfgData.setVar("__ONLYFINALISE", virtual or "default") + bb_data = cls.load_bbfile(fn, appends, cfgData) + return bb_data[virtual] + + @classmethod + def parse(cls, filename, appends, configdata, caches_array): + """Parse the specified filename, returning the recipe information""" + infos = [] + datastores = cls.load_bbfile(filename, appends, configdata) + depends = [] + for variant, data in sorted(datastores.iteritems(), + key=lambda i: i[0], + reverse=True): + virtualfn = cls.realfn2virtual(filename, variant) + depends = depends + (data.getVar("__depends", False) or []) + if depends and not variant: + data.setVar("__depends", depends) + + info_array = [] + for cache_class in caches_array: + if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon): + info = cache_class(filename, data) + info_array.append(info) + infos.append((virtualfn, info_array)) + + return infos + + def load(self, filename, appends, configdata): + """Obtain the recipe information for the specified filename, + using cached values if available, otherwise parsing. + + Note that if it does parse to obtain the info, it will not + automatically add the information to the cache or to your + CacheData. Use the add or add_info method to do so after + running this, or use loadData instead.""" + cached = self.cacheValid(filename, appends) + if cached: + infos = [] + # info_array item is a list of [CoreRecipeInfo, XXXRecipeInfo] + info_array = self.depends_cache[filename] + for variant in info_array[0].variants: + virtualfn = self.realfn2virtual(filename, variant) + infos.append((virtualfn, self.depends_cache[virtualfn])) + else: + logger.debug(1, "Parsing %s", filename) + return self.parse(filename, appends, configdata, self.caches_array) + + return cached, infos + + def loadData(self, fn, appends, cfgData, cacheData): + """Load the recipe info for the specified filename, + parsing and adding to the cache if necessary, and adding + the recipe information to the supplied CacheData instance.""" + skipped, virtuals = 0, 0 + + cached, infos = self.load(fn, appends, cfgData) + for virtualfn, info_array in infos: + if info_array[0].skipped: + logger.debug(1, "Skipping %s: %s", virtualfn, info_array[0].skipreason) + skipped += 1 + else: + self.add_info(virtualfn, info_array, cacheData, not cached) + virtuals += 1 + + return cached, skipped, virtuals + + def cacheValid(self, fn, appends): + """ + Is the cache valid for fn? + Fast version, no timestamps checked. + """ + if fn not in self.checked: + self.cacheValidUpdate(fn, appends) + + # Is cache enabled? + if not self.has_cache: + return False + if fn in self.clean: + return True + return False + + def cacheValidUpdate(self, fn, appends): + """ + Is the cache valid for fn? + Make thorough (slower) checks including timestamps. + """ + # Is cache enabled? + if not self.has_cache: + return False + + self.checked.add(fn) + + # File isn't in depends_cache + if not fn in self.depends_cache: + logger.debug(2, "Cache: %s is not cached", fn) + return False + + mtime = bb.parse.cached_mtime_noerror(fn) + + # Check file still exists + if mtime == 0: + logger.debug(2, "Cache: %s no longer exists", fn) + self.remove(fn) + return False + + info_array = self.depends_cache[fn] + # Check the file's timestamp + if mtime != info_array[0].timestamp: + logger.debug(2, "Cache: %s changed", fn) + self.remove(fn) + return False + + # Check dependencies are still valid + depends = info_array[0].file_depends + if depends: + for f, old_mtime in depends: + fmtime = bb.parse.cached_mtime_noerror(f) + # Check if file still exists + if old_mtime != 0 and fmtime == 0: + logger.debug(2, "Cache: %s's dependency %s was removed", + fn, f) + self.remove(fn) + return False + + if (fmtime != old_mtime): + logger.debug(2, "Cache: %s's dependency %s changed", + fn, f) + self.remove(fn) + return False + + if hasattr(info_array[0], 'file_checksums'): + for _, fl in info_array[0].file_checksums.items(): + for f in fl.split(): + if not ('*' in f or os.path.exists(f)): + logger.debug(2, "Cache: %s's file checksum list file %s was removed", + fn, f) + self.remove(fn) + return False + + if appends != info_array[0].appends: + logger.debug(2, "Cache: appends for %s changed", fn) + logger.debug(2, "%s to %s" % (str(appends), str(info_array[0].appends))) + self.remove(fn) + return False + + invalid = False + for cls in info_array[0].variants: + virtualfn = self.realfn2virtual(fn, cls) + self.clean.add(virtualfn) + if virtualfn not in self.depends_cache: + logger.debug(2, "Cache: %s is not cached", virtualfn) + invalid = True + + # If any one of the variants is not present, mark as invalid for all + if invalid: + for cls in info_array[0].variants: + virtualfn = self.realfn2virtual(fn, cls) + if virtualfn in self.clean: + logger.debug(2, "Cache: Removing %s from cache", virtualfn) + self.clean.remove(virtualfn) + if fn in self.clean: + logger.debug(2, "Cache: Marking %s as not clean", fn) + self.clean.remove(fn) + return False + + self.clean.add(fn) + return True + + def remove(self, fn): + """ + Remove a fn from the cache + Called from the parser in error cases + """ + if fn in self.depends_cache: + logger.debug(1, "Removing %s from cache", fn) + del self.depends_cache[fn] + if fn in self.clean: + logger.debug(1, "Marking %s as unclean", fn) + self.clean.remove(fn) + + def sync(self): + """ + Save the cache + Called from the parser when complete (or exiting) + """ + + if not self.has_cache: + return + + if self.cacheclean: + logger.debug(2, "Cache is clean, not saving.") + return + + file_dict = {} + pickler_dict = {} + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon): + cache_class_name = cache_class.__name__ + cachefile = getCacheFile(self.cachedir, cache_class.cachefile, self.data_hash) + file_dict[cache_class_name] = open(cachefile, "wb") + pickler_dict[cache_class_name] = pickle.Pickler(file_dict[cache_class_name], pickle.HIGHEST_PROTOCOL) + + pickler_dict['CoreRecipeInfo'].dump(__cache_version__) + pickler_dict['CoreRecipeInfo'].dump(bb.__version__) + + try: + for key, info_array in self.depends_cache.iteritems(): + for info in info_array: + if isinstance(info, RecipeInfoCommon): + cache_class_name = info.__class__.__name__ + pickler_dict[cache_class_name].dump(key) + pickler_dict[cache_class_name].dump(info) + finally: + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon): + cache_class_name = cache_class.__name__ + file_dict[cache_class_name].close() + + del self.depends_cache + + @staticmethod + def mtime(cachefile): + return bb.parse.cached_mtime_noerror(cachefile) + + def add_info(self, filename, info_array, cacheData, parsed=None): + if isinstance(info_array[0], CoreRecipeInfo) and (not info_array[0].skipped): + cacheData.add_from_recipeinfo(filename, info_array) + + if not self.has_cache: + return + + if (info_array[0].skipped or 'SRCREVINACTION' not in info_array[0].pv) and not info_array[0].nocache: + if parsed: + self.cacheclean = False + self.depends_cache[filename] = info_array + + def add(self, file_name, data, cacheData, parsed=None): + """ + Save data we need into the cache + """ + + realfn = self.virtualfn2realfn(file_name)[0] + + info_array = [] + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon): + info_array.append(cache_class(realfn, data)) + self.add_info(file_name, info_array, cacheData, parsed) + + @staticmethod + def load_bbfile(bbfile, appends, config): + """ + Load and parse one .bb build file + Return the data and whether parsing resulted in the file being skipped + """ + chdir_back = False + + from bb import data, parse + + # expand tmpdir to include this topdir + data.setVar('TMPDIR', data.getVar('TMPDIR', config, 1) or "", config) + bbfile_loc = os.path.abspath(os.path.dirname(bbfile)) + oldpath = os.path.abspath(os.getcwd()) + parse.cached_mtime_noerror(bbfile_loc) + bb_data = data.init_db(config) + # The ConfHandler first looks if there is a TOPDIR and if not + # then it would call getcwd(). + # Previously, we chdir()ed to bbfile_loc, called the handler + # and finally chdir()ed back, a couple of thousand times. We now + # just fill in TOPDIR to point to bbfile_loc if there is no TOPDIR yet. + if not data.getVar('TOPDIR', bb_data): + chdir_back = True + data.setVar('TOPDIR', bbfile_loc, bb_data) + try: + if appends: + data.setVar('__BBAPPEND', " ".join(appends), bb_data) + bb_data = parse.handle(bbfile, bb_data) + if chdir_back: + os.chdir(oldpath) + return bb_data + except: + if chdir_back: + os.chdir(oldpath) + raise + + +def init(cooker): + """ + The Objective: Cache the minimum amount of data possible yet get to the + stage of building packages (i.e. tryBuild) without reparsing any .bb files. + + To do this, we intercept getVar calls and only cache the variables we see + being accessed. We rely on the cache getVar calls being made for all + variables bitbake might need to use to reach this stage. For each cached + file we need to track: + + * Its mtime + * The mtimes of all its dependencies + * Whether it caused a parse.SkipPackage exception + + Files causing parsing errors are evicted from the cache. + + """ + return Cache(cooker.configuration.data, cooker.configuration.data_hash) + + +class CacheData(object): + """ + The data structures we compile from the cached data + """ + + def __init__(self, caches_array): + self.caches_array = caches_array + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, RecipeInfoCommon): + cache_class.init_cacheData(self) + + # Direct cache variables + self.task_queues = {} + self.preferred = {} + self.tasks = {} + # Indirect Cache variables (set elsewhere) + self.ignored_dependencies = [] + self.world_target = set() + self.bbfile_priority = {} + + def add_from_recipeinfo(self, fn, info_array): + for info in info_array: + info.add_cacheData(self, fn) + +class MultiProcessCache(object): + """ + BitBake multi-process cache implementation + + Used by the codeparser & file checksum caches + """ + + def __init__(self): + self.cachefile = None + self.cachedata = self.create_cachedata() + self.cachedata_extras = self.create_cachedata() + + def init_cache(self, d): + cachedir = (d.getVar("PERSISTENT_DIR", True) or + d.getVar("CACHE", True)) + if cachedir in [None, '']: + return + bb.utils.mkdirhier(cachedir) + self.cachefile = os.path.join(cachedir, self.__class__.cache_file_name) + logger.debug(1, "Using cache in '%s'", self.cachefile) + + glf = bb.utils.lockfile(self.cachefile + ".lock") + + try: + with open(self.cachefile, "rb") as f: + p = pickle.Unpickler(f) + data, version = p.load() + except: + bb.utils.unlockfile(glf) + return + + bb.utils.unlockfile(glf) + + if version != self.__class__.CACHE_VERSION: + return + + self.cachedata = data + + def internSet(self, items): + new = set() + for i in items: + new.add(intern(i)) + return new + + def compress_keys(self, data): + # Override in subclasses if desired + return + + def create_cachedata(self): + data = [{}] + return data + + def save_extras(self, d): + if not self.cachefile: + return + + glf = bb.utils.lockfile(self.cachefile + ".lock", shared=True) + + i = os.getpid() + lf = None + while not lf: + lf = bb.utils.lockfile(self.cachefile + ".lock." + str(i), retry=False) + if not lf or os.path.exists(self.cachefile + "-" + str(i)): + if lf: + bb.utils.unlockfile(lf) + lf = None + i = i + 1 + continue + + with open(self.cachefile + "-" + str(i), "wb") as f: + p = pickle.Pickler(f, -1) + p.dump([self.cachedata_extras, self.__class__.CACHE_VERSION]) + + bb.utils.unlockfile(lf) + bb.utils.unlockfile(glf) + + def merge_data(self, source, dest): + for j in range(0,len(dest)): + for h in source[j]: + if h not in dest[j]: + dest[j][h] = source[j][h] + + def save_merge(self, d): + if not self.cachefile: + return + + glf = bb.utils.lockfile(self.cachefile + ".lock") + + try: + with open(self.cachefile, "rb") as f: + p = pickle.Unpickler(f) + data, version = p.load() + except (IOError, EOFError): + data, version = None, None + + if version != self.__class__.CACHE_VERSION: + data = self.create_cachedata() + + for f in [y for y in os.listdir(os.path.dirname(self.cachefile)) if y.startswith(os.path.basename(self.cachefile) + '-')]: + f = os.path.join(os.path.dirname(self.cachefile), f) + try: + with open(f, "rb") as fd: + p = pickle.Unpickler(fd) + extradata, version = p.load() + except (IOError, EOFError): + extradata, version = self.create_cachedata(), None + + if version != self.__class__.CACHE_VERSION: + continue + + self.merge_data(extradata, data) + os.unlink(f) + + self.compress_keys(data) + + with open(self.cachefile, "wb") as f: + p = pickle.Pickler(f, -1) + p.dump([data, self.__class__.CACHE_VERSION]) + + bb.utils.unlockfile(glf) + diff --git a/bitbake/lib/bb/cache_extra.py b/bitbake/lib/bb/cache_extra.py new file mode 100644 index 0000000000..83f4959d6c --- /dev/null +++ b/bitbake/lib/bb/cache_extra.py @@ -0,0 +1,75 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Extra RecipeInfo will be all defined in this file. Currently, +# Only Hob (Image Creator) Requests some extra fields. So +# HobRecipeInfo is defined. It's named HobRecipeInfo because it +# is introduced by 'hob'. Users could also introduce other +# RecipeInfo or simply use those already defined RecipeInfo. +# In the following patch, this newly defined new extra RecipeInfo +# will be dynamically loaded and used for loading/saving the extra +# cache fields + +# Copyright (C) 2011, Intel Corporation. All rights reserved. + +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from bb.cache import RecipeInfoCommon + +class HobRecipeInfo(RecipeInfoCommon): + __slots__ = () + + classname = "HobRecipeInfo" + # please override this member with the correct data cache file + # such as (bb_cache.dat, bb_extracache_hob.dat) + cachefile = "bb_extracache_" + classname +".dat" + + # override this member with the list of extra cache fields + # that this class will provide + cachefields = ['summary', 'license', 'section', + 'description', 'homepage', 'bugtracker', + 'prevision', 'files_info'] + + def __init__(self, filename, metadata): + + self.summary = self.getvar('SUMMARY', metadata) + self.license = self.getvar('LICENSE', metadata) + self.section = self.getvar('SECTION', metadata) + self.description = self.getvar('DESCRIPTION', metadata) + self.homepage = self.getvar('HOMEPAGE', metadata) + self.bugtracker = self.getvar('BUGTRACKER', metadata) + self.prevision = self.getvar('PR', metadata) + self.files_info = self.getvar('FILES_INFO', metadata) + + @classmethod + def init_cacheData(cls, cachedata): + # CacheData in Hob RecipeInfo Class + cachedata.summary = {} + cachedata.license = {} + cachedata.section = {} + cachedata.description = {} + cachedata.homepage = {} + cachedata.bugtracker = {} + cachedata.prevision = {} + cachedata.files_info = {} + + def add_cacheData(self, cachedata, fn): + cachedata.summary[fn] = self.summary + cachedata.license[fn] = self.license + cachedata.section[fn] = self.section + cachedata.description[fn] = self.description + cachedata.homepage[fn] = self.homepage + cachedata.bugtracker[fn] = self.bugtracker + cachedata.prevision[fn] = self.prevision + cachedata.files_info[fn] = self.files_info diff --git a/bitbake/lib/bb/checksum.py b/bitbake/lib/bb/checksum.py new file mode 100644 index 0000000000..514ff0b1e6 --- /dev/null +++ b/bitbake/lib/bb/checksum.py @@ -0,0 +1,90 @@ +# Local file checksum cache implementation +# +# Copyright (C) 2012 Intel Corporation +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import stat +import bb.utils +import logging +from bb.cache import MultiProcessCache + +logger = logging.getLogger("BitBake.Cache") + +try: + import cPickle as pickle +except ImportError: + import pickle + logger.info("Importing cPickle failed. " + "Falling back to a very slow implementation.") + + +# mtime cache (non-persistent) +# based upon the assumption that files do not change during bitbake run +class FileMtimeCache(object): + cache = {} + + def cached_mtime(self, f): + if f not in self.cache: + self.cache[f] = os.stat(f)[stat.ST_MTIME] + return self.cache[f] + + def cached_mtime_noerror(self, f): + if f not in self.cache: + try: + self.cache[f] = os.stat(f)[stat.ST_MTIME] + except OSError: + return 0 + return self.cache[f] + + def update_mtime(self, f): + self.cache[f] = os.stat(f)[stat.ST_MTIME] + return self.cache[f] + + def clear(self): + self.cache.clear() + +# Checksum + mtime cache (persistent) +class FileChecksumCache(MultiProcessCache): + cache_file_name = "local_file_checksum_cache.dat" + CACHE_VERSION = 1 + + def __init__(self): + self.mtime_cache = FileMtimeCache() + MultiProcessCache.__init__(self) + + def get_checksum(self, f): + entry = self.cachedata[0].get(f) + cmtime = self.mtime_cache.cached_mtime(f) + if entry: + (mtime, hashval) = entry + if cmtime == mtime: + return hashval + else: + bb.debug(2, "file %s changed mtime, recompute checksum" % f) + + hashval = bb.utils.md5_file(f) + self.cachedata_extras[0][f] = (cmtime, hashval) + return hashval + + def merge_data(self, source, dest): + for h in source[0]: + if h in dest: + (smtime, _) = source[0][h] + (dmtime, _) = dest[0][h] + if smtime > dmtime: + dest[0][h] = source[0][h] + else: + dest[0][h] = source[0][h] diff --git a/bitbake/lib/bb/codeparser.py b/bitbake/lib/bb/codeparser.py new file mode 100644 index 0000000000..a50b9f268a --- /dev/null +++ b/bitbake/lib/bb/codeparser.py @@ -0,0 +1,328 @@ +import ast +import codegen +import logging +import os.path +import bb.utils, bb.data +from itertools import chain +from pysh import pyshyacc, pyshlex, sherrors +from bb.cache import MultiProcessCache + + +logger = logging.getLogger('BitBake.CodeParser') + +try: + import cPickle as pickle +except ImportError: + import pickle + logger.info('Importing cPickle failed. Falling back to a very slow implementation.') + + +def check_indent(codestr): + """If the code is indented, add a top level piece of code to 'remove' the indentation""" + + i = 0 + while codestr[i] in ["\n", "\t", " "]: + i = i + 1 + + if i == 0: + return codestr + + if codestr[i-1] == "\t" or codestr[i-1] == " ": + return "if 1:\n" + codestr + + return codestr + + +class CodeParserCache(MultiProcessCache): + cache_file_name = "bb_codeparser.dat" + CACHE_VERSION = 4 + + def __init__(self): + MultiProcessCache.__init__(self) + self.pythoncache = self.cachedata[0] + self.shellcache = self.cachedata[1] + self.pythoncacheextras = self.cachedata_extras[0] + self.shellcacheextras = self.cachedata_extras[1] + + def init_cache(self, d): + MultiProcessCache.init_cache(self, d) + + # cachedata gets re-assigned in the parent + self.pythoncache = self.cachedata[0] + self.shellcache = self.cachedata[1] + + def compress_keys(self, data): + # When the dicts are originally created, python calls intern() on the set keys + # which significantly improves memory usage. Sadly the pickle/unpickle process + # doesn't call intern() on the keys and results in the same strings being duplicated + # in memory. This also means pickle will save the same string multiple times in + # the cache file. By interning the data here, the cache file shrinks dramatically + # meaning faster load times and the reloaded cache files also consume much less + # memory. This is worth any performance hit from this loops and the use of the + # intern() data storage. + # Python 3.x may behave better in this area + for h in data[0]: + data[0][h]["refs"] = self.internSet(data[0][h]["refs"]) + data[0][h]["execs"] = self.internSet(data[0][h]["execs"]) + for k in data[0][h]["contains"]: + data[0][h]["contains"][k] = self.internSet(data[0][h]["contains"][k]) + for h in data[1]: + data[1][h]["execs"] = self.internSet(data[1][h]["execs"]) + return + + def create_cachedata(self): + data = [{}, {}] + return data + +codeparsercache = CodeParserCache() + +def parser_cache_init(d): + codeparsercache.init_cache(d) + +def parser_cache_save(d): + codeparsercache.save_extras(d) + +def parser_cache_savemerge(d): + codeparsercache.save_merge(d) + +Logger = logging.getLoggerClass() +class BufferedLogger(Logger): + def __init__(self, name, level=0, target=None): + Logger.__init__(self, name) + self.setLevel(level) + self.buffer = [] + self.target = target + + def handle(self, record): + self.buffer.append(record) + + def flush(self): + for record in self.buffer: + self.target.handle(record) + self.buffer = [] + +class PythonParser(): + getvars = ("d.getVar", "bb.data.getVar", "data.getVar", "d.appendVar", "d.prependVar") + containsfuncs = ("bb.utils.contains", "base_contains", "oe.utils.contains") + execfuncs = ("bb.build.exec_func", "bb.build.exec_task") + + def warn(self, func, arg): + """Warn about calls of bitbake APIs which pass a non-literal + argument for the variable name, as we're not able to track such + a reference. + """ + + try: + funcstr = codegen.to_source(func) + argstr = codegen.to_source(arg) + except TypeError: + self.log.debug(2, 'Failed to convert function and argument to source form') + else: + self.log.debug(1, self.unhandled_message % (funcstr, argstr)) + + def visit_Call(self, node): + name = self.called_node_name(node.func) + if name in self.getvars or name in self.containsfuncs: + if isinstance(node.args[0], ast.Str): + varname = node.args[0].s + if name in self.containsfuncs and isinstance(node.args[1], ast.Str): + if varname not in self.contains: + self.contains[varname] = set() + self.contains[varname].add(node.args[1].s) + else: + self.references.add(node.args[0].s) + else: + self.warn(node.func, node.args[0]) + elif name in self.execfuncs: + if isinstance(node.args[0], ast.Str): + self.var_execs.add(node.args[0].s) + else: + self.warn(node.func, node.args[0]) + elif name and isinstance(node.func, (ast.Name, ast.Attribute)): + self.execs.add(name) + + def called_node_name(self, node): + """Given a called node, return its original string form""" + components = [] + while node: + if isinstance(node, ast.Attribute): + components.append(node.attr) + node = node.value + elif isinstance(node, ast.Name): + components.append(node.id) + return '.'.join(reversed(components)) + else: + break + + def __init__(self, name, log): + self.var_execs = set() + self.contains = {} + self.execs = set() + self.references = set() + self.log = BufferedLogger('BitBake.Data.PythonParser', logging.DEBUG, log) + + self.unhandled_message = "in call of %s, argument '%s' is not a string literal" + self.unhandled_message = "while parsing %s, %s" % (name, self.unhandled_message) + + def parse_python(self, node): + h = hash(str(node)) + + if h in codeparsercache.pythoncache: + self.references = codeparsercache.pythoncache[h]["refs"] + self.execs = codeparsercache.pythoncache[h]["execs"] + self.contains = codeparsercache.pythoncache[h]["contains"] + return + + if h in codeparsercache.pythoncacheextras: + self.references = codeparsercache.pythoncacheextras[h]["refs"] + self.execs = codeparsercache.pythoncacheextras[h]["execs"] + self.contains = codeparsercache.pythoncacheextras[h]["contains"] + return + + code = compile(check_indent(str(node)), "", "exec", + ast.PyCF_ONLY_AST) + + for n in ast.walk(code): + if n.__class__.__name__ == "Call": + self.visit_Call(n) + + self.execs.update(self.var_execs) + + codeparsercache.pythoncacheextras[h] = {} + codeparsercache.pythoncacheextras[h]["refs"] = self.references + codeparsercache.pythoncacheextras[h]["execs"] = self.execs + codeparsercache.pythoncacheextras[h]["contains"] = self.contains + +class ShellParser(): + def __init__(self, name, log): + self.funcdefs = set() + self.allexecs = set() + self.execs = set() + self.log = BufferedLogger('BitBake.Data.%s' % name, logging.DEBUG, log) + self.unhandled_template = "unable to handle non-literal command '%s'" + self.unhandled_template = "while parsing %s, %s" % (name, self.unhandled_template) + + def parse_shell(self, value): + """Parse the supplied shell code in a string, returning the external + commands it executes. + """ + + h = hash(str(value)) + + if h in codeparsercache.shellcache: + self.execs = codeparsercache.shellcache[h]["execs"] + return self.execs + + if h in codeparsercache.shellcacheextras: + self.execs = codeparsercache.shellcacheextras[h]["execs"] + return self.execs + + try: + tokens, _ = pyshyacc.parse(value, eof=True, debug=False) + except pyshlex.NeedMore: + raise sherrors.ShellSyntaxError("Unexpected EOF") + + for token in tokens: + self.process_tokens(token) + self.execs = set(cmd for cmd in self.allexecs if cmd not in self.funcdefs) + + codeparsercache.shellcacheextras[h] = {} + codeparsercache.shellcacheextras[h]["execs"] = self.execs + + return self.execs + + def process_tokens(self, tokens): + """Process a supplied portion of the syntax tree as returned by + pyshyacc.parse. + """ + + def function_definition(value): + self.funcdefs.add(value.name) + return [value.body], None + + def case_clause(value): + # Element 0 of each item in the case is the list of patterns, and + # Element 1 of each item in the case is the list of commands to be + # executed when that pattern matches. + words = chain(*[item[0] for item in value.items]) + cmds = chain(*[item[1] for item in value.items]) + return cmds, words + + def if_clause(value): + main = chain(value.cond, value.if_cmds) + rest = value.else_cmds + if isinstance(rest, tuple) and rest[0] == "elif": + return chain(main, if_clause(rest[1])) + else: + return chain(main, rest) + + def simple_command(value): + return None, chain(value.words, (assign[1] for assign in value.assigns)) + + token_handlers = { + "and_or": lambda x: ((x.left, x.right), None), + "async": lambda x: ([x], None), + "brace_group": lambda x: (x.cmds, None), + "for_clause": lambda x: (x.cmds, x.items), + "function_definition": function_definition, + "if_clause": lambda x: (if_clause(x), None), + "pipeline": lambda x: (x.commands, None), + "redirect_list": lambda x: ([x.cmd], None), + "subshell": lambda x: (x.cmds, None), + "while_clause": lambda x: (chain(x.condition, x.cmds), None), + "until_clause": lambda x: (chain(x.condition, x.cmds), None), + "simple_command": simple_command, + "case_clause": case_clause, + } + + for token in tokens: + name, value = token + try: + more_tokens, words = token_handlers[name](value) + except KeyError: + raise NotImplementedError("Unsupported token type " + name) + + if more_tokens: + self.process_tokens(more_tokens) + + if words: + self.process_words(words) + + def process_words(self, words): + """Process a set of 'words' in pyshyacc parlance, which includes + extraction of executed commands from $() blocks, as well as grabbing + the command name argument. + """ + + words = list(words) + for word in list(words): + wtree = pyshlex.make_wordtree(word[1]) + for part in wtree: + if not isinstance(part, list): + continue + + if part[0] in ('`', '$('): + command = pyshlex.wordtree_as_string(part[1:-1]) + self.parse_shell(command) + + if word[0] in ("cmd_name", "cmd_word"): + if word in words: + words.remove(word) + + usetoken = False + for word in words: + if word[0] in ("cmd_name", "cmd_word") or \ + (usetoken and word[0] == "TOKEN"): + if "=" in word[1]: + usetoken = True + continue + + cmd = word[1] + if cmd.startswith("$"): + self.log.debug(1, self.unhandled_template % cmd) + elif cmd == "eval": + command = " ".join(word for _, word in words[1:]) + self.parse_shell(command) + else: + self.allexecs.add(cmd) + break diff --git a/bitbake/lib/bb/command.py b/bitbake/lib/bb/command.py new file mode 100644 index 0000000000..84fcdf9433 --- /dev/null +++ b/bitbake/lib/bb/command.py @@ -0,0 +1,444 @@ +""" +BitBake 'Command' module + +Provide an interface to interact with the bitbake server through 'commands' +""" + +# Copyright (C) 2006-2007 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +""" +The bitbake server takes 'commands' from its UI/commandline. +Commands are either synchronous or asynchronous. +Async commands return data to the client in the form of events. +Sync commands must only return data through the function return value +and must not trigger events, directly or indirectly. +Commands are queued in a CommandQueue +""" + +import bb.event +import bb.cooker + +class CommandCompleted(bb.event.Event): + pass + +class CommandExit(bb.event.Event): + def __init__(self, exitcode): + bb.event.Event.__init__(self) + self.exitcode = int(exitcode) + +class CommandFailed(CommandExit): + def __init__(self, message): + self.error = message + CommandExit.__init__(self, 1) + +class CommandError(Exception): + pass + +class Command: + """ + A queue of asynchronous commands for bitbake + """ + def __init__(self, cooker): + self.cooker = cooker + self.cmds_sync = CommandsSync() + self.cmds_async = CommandsAsync() + + # FIXME Add lock for this + self.currentAsyncCommand = None + + def runCommand(self, commandline, ro_only = False): + command = commandline.pop(0) + if hasattr(CommandsSync, command): + # Can run synchronous commands straight away + command_method = getattr(self.cmds_sync, command) + if ro_only: + if not hasattr(command_method, 'readonly') or False == getattr(command_method, 'readonly'): + return None, "Not able to execute not readonly commands in readonly mode" + try: + result = command_method(self, commandline) + except CommandError as exc: + return None, exc.args[0] + except Exception: + import traceback + return None, traceback.format_exc() + else: + return result, None + if self.currentAsyncCommand is not None: + return None, "Busy (%s in progress)" % self.currentAsyncCommand[0] + if command not in CommandsAsync.__dict__: + return None, "No such command" + self.currentAsyncCommand = (command, commandline) + self.cooker.configuration.server_register_idlecallback(self.cooker.runCommands, self.cooker) + return True, None + + def runAsyncCommand(self): + try: + if self.cooker.state == bb.cooker.state.error: + return False + if self.currentAsyncCommand is not None: + (command, options) = self.currentAsyncCommand + commandmethod = getattr(CommandsAsync, command) + needcache = getattr( commandmethod, "needcache" ) + if needcache and self.cooker.state != bb.cooker.state.running: + self.cooker.updateCache() + return True + else: + commandmethod(self.cmds_async, self, options) + return False + else: + return False + except KeyboardInterrupt as exc: + self.finishAsyncCommand("Interrupted") + return False + except SystemExit as exc: + arg = exc.args[0] + if isinstance(arg, basestring): + self.finishAsyncCommand(arg) + else: + self.finishAsyncCommand("Exited with %s" % arg) + return False + except Exception as exc: + import traceback + if isinstance(exc, bb.BBHandledException): + self.finishAsyncCommand("") + else: + self.finishAsyncCommand(traceback.format_exc()) + return False + + def finishAsyncCommand(self, msg=None, code=None): + if msg or msg == "": + bb.event.fire(CommandFailed(msg), self.cooker.event_data) + elif code: + bb.event.fire(CommandExit(code), self.cooker.event_data) + else: + bb.event.fire(CommandCompleted(), self.cooker.event_data) + self.currentAsyncCommand = None + self.cooker.finishcommand() + +class CommandsSync: + """ + A class of synchronous commands + These should run quickly so as not to hurt interactive performance. + These must not influence any running synchronous command. + """ + + def stateShutdown(self, command, params): + """ + Trigger cooker 'shutdown' mode + """ + command.cooker.shutdown(False) + + def stateForceShutdown(self, command, params): + """ + Stop the cooker + """ + command.cooker.shutdown(True) + + def getAllKeysWithFlags(self, command, params): + """ + Returns a dump of the global state. Call with + variable flags to be retrieved as params. + """ + flaglist = params[0] + return command.cooker.getAllKeysWithFlags(flaglist) + getAllKeysWithFlags.readonly = True + + def getVariable(self, command, params): + """ + Read the value of a variable from data + """ + varname = params[0] + expand = True + if len(params) > 1: + expand = (params[1] == "True") + + return command.cooker.data.getVar(varname, expand) + getVariable.readonly = True + + def setVariable(self, command, params): + """ + Set the value of variable in data + """ + varname = params[0] + value = str(params[1]) + command.cooker.data.setVar(varname, value) + + def setConfig(self, command, params): + """ + Set the value of variable in configuration + """ + varname = params[0] + value = str(params[1]) + setattr(command.cooker.configuration, varname, value) + + def enableDataTracking(self, command, params): + """ + Enable history tracking for variables + """ + command.cooker.enableDataTracking() + + def disableDataTracking(self, command, params): + """ + Disable history tracking for variables + """ + command.cooker.disableDataTracking() + + def setPrePostConfFiles(self, command, params): + prefiles = params[0].split() + postfiles = params[1].split() + command.cooker.configuration.prefile = prefiles + command.cooker.configuration.postfile = postfiles + + def getCpuCount(self, command, params): + """ + Get the CPU count on the bitbake server + """ + return bb.utils.cpu_count() + getCpuCount.readonly = True + + def matchFile(self, command, params): + fMatch = params[0] + return command.cooker.matchFile(fMatch) + + def generateNewImage(self, command, params): + image = params[0] + base_image = params[1] + package_queue = params[2] + timestamp = params[3] + description = params[4] + return command.cooker.generateNewImage(image, base_image, + package_queue, timestamp, description) + + def ensureDir(self, command, params): + directory = params[0] + bb.utils.mkdirhier(directory) + + def setVarFile(self, command, params): + """ + Save a variable in a file; used for saving in a configuration file + """ + var = params[0] + val = params[1] + default_file = params[2] + op = params[3] + command.cooker.modifyConfigurationVar(var, val, default_file, op) + + def removeVarFile(self, command, params): + """ + Remove a variable declaration from a file + """ + var = params[0] + command.cooker.removeConfigurationVar(var) + + def createConfigFile(self, command, params): + """ + Create an extra configuration file + """ + name = params[0] + command.cooker.createConfigFile(name) + + def setEventMask(self, command, params): + handlerNum = params[0] + llevel = params[1] + debug_domains = params[2] + mask = params[3] + return bb.event.set_UIHmask(handlerNum, llevel, debug_domains, mask) + + def setFeatures(self, command, params): + """ + Set the cooker features to include the passed list of features + """ + features = params[0] + command.cooker.setFeatures(features) + + # although we change the internal state of the cooker, this is transparent since + # we always take and leave the cooker in state.initial + setFeatures.readonly = True + +class CommandsAsync: + """ + A class of asynchronous commands + These functions communicate via generated events. + Any function that requires metadata parsing should be here. + """ + + def buildFile(self, command, params): + """ + Build a single specified .bb file + """ + bfile = params[0] + task = params[1] + + command.cooker.buildFile(bfile, task) + buildFile.needcache = False + + def buildTargets(self, command, params): + """ + Build a set of targets + """ + pkgs_to_build = params[0] + task = params[1] + + command.cooker.buildTargets(pkgs_to_build, task) + buildTargets.needcache = True + + def generateDepTreeEvent(self, command, params): + """ + Generate an event containing the dependency information + """ + pkgs_to_build = params[0] + task = params[1] + + command.cooker.generateDepTreeEvent(pkgs_to_build, task) + command.finishAsyncCommand() + generateDepTreeEvent.needcache = True + + def generateDotGraph(self, command, params): + """ + Dump dependency information to disk as .dot files + """ + pkgs_to_build = params[0] + task = params[1] + + command.cooker.generateDotGraphFiles(pkgs_to_build, task) + command.finishAsyncCommand() + generateDotGraph.needcache = True + + def generateTargetsTree(self, command, params): + """ + Generate a tree of buildable targets. + If klass is provided ensure all recipes that inherit the class are + included in the package list. + If pkg_list provided use that list (plus any extras brought in by + klass) rather than generating a tree for all packages. + """ + klass = params[0] + pkg_list = params[1] + + command.cooker.generateTargetsTree(klass, pkg_list) + command.finishAsyncCommand() + generateTargetsTree.needcache = True + + def findCoreBaseFiles(self, command, params): + """ + Find certain files in COREBASE directory. i.e. Layers + """ + subdir = params[0] + filename = params[1] + + command.cooker.findCoreBaseFiles(subdir, filename) + command.finishAsyncCommand() + findCoreBaseFiles.needcache = False + + def findConfigFiles(self, command, params): + """ + Find config files which provide appropriate values + for the passed configuration variable. i.e. MACHINE + """ + varname = params[0] + + command.cooker.findConfigFiles(varname) + command.finishAsyncCommand() + findConfigFiles.needcache = False + + def findFilesMatchingInDir(self, command, params): + """ + Find implementation files matching the specified pattern + in the requested subdirectory of a BBPATH + """ + pattern = params[0] + directory = params[1] + + command.cooker.findFilesMatchingInDir(pattern, directory) + command.finishAsyncCommand() + findFilesMatchingInDir.needcache = False + + def findConfigFilePath(self, command, params): + """ + Find the path of the requested configuration file + """ + configfile = params[0] + + command.cooker.findConfigFilePath(configfile) + command.finishAsyncCommand() + findConfigFilePath.needcache = False + + def showVersions(self, command, params): + """ + Show the currently selected versions + """ + command.cooker.showVersions() + command.finishAsyncCommand() + showVersions.needcache = True + + def showEnvironmentTarget(self, command, params): + """ + Print the environment of a target recipe + (needs the cache to work out which recipe to use) + """ + pkg = params[0] + + command.cooker.showEnvironment(None, pkg) + command.finishAsyncCommand() + showEnvironmentTarget.needcache = True + + def showEnvironment(self, command, params): + """ + Print the standard environment + or if specified the environment for a specified recipe + """ + bfile = params[0] + + command.cooker.showEnvironment(bfile) + command.finishAsyncCommand() + showEnvironment.needcache = False + + def parseFiles(self, command, params): + """ + Parse the .bb files + """ + command.cooker.updateCache() + command.finishAsyncCommand() + parseFiles.needcache = True + + def compareRevisions(self, command, params): + """ + Parse the .bb files + """ + if bb.fetch.fetcher_compare_revisions(command.cooker.data): + command.finishAsyncCommand(code=1) + else: + command.finishAsyncCommand() + compareRevisions.needcache = True + + def triggerEvent(self, command, params): + """ + Trigger a certain event + """ + event = params[0] + bb.event.fire(eval(event), command.cooker.data) + command.currentAsyncCommand = None + triggerEvent.needcache = False + + def resetCooker(self, command, params): + """ + Reset the cooker to its initial state, thus forcing a reparse for + any async command that has the needcache property set to True + """ + command.cooker.reset() + command.finishAsyncCommand() + resetCooker.needcache = False + diff --git a/bitbake/lib/bb/compat.py b/bitbake/lib/bb/compat.py new file mode 100644 index 0000000000..de1923d28a --- /dev/null +++ b/bitbake/lib/bb/compat.py @@ -0,0 +1,6 @@ +"""Code pulled from future python versions, here for compatibility""" + +from collections import MutableMapping, KeysView, ValuesView, ItemsView, OrderedDict +from functools import total_ordering + + diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py new file mode 100644 index 0000000000..f44a08889a --- /dev/null +++ b/bitbake/lib/bb/cooker.py @@ -0,0 +1,1874 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer +# Copyright (C) 2005 Holger Hans Peter Freyther +# Copyright (C) 2005 ROAD GmbH +# Copyright (C) 2006 - 2007 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from __future__ import print_function +import sys, os, glob, os.path, re, time +import atexit +import itertools +import logging +import multiprocessing +import sre_constants +import threading +from cStringIO import StringIO +from contextlib import closing +from functools import wraps +from collections import defaultdict +import bb, bb.exceptions, bb.command +from bb import utils, data, parse, event, cache, providers, taskdata, runqueue +import Queue +import signal +import prserv.serv + +logger = logging.getLogger("BitBake") +collectlog = logging.getLogger("BitBake.Collection") +buildlog = logging.getLogger("BitBake.Build") +parselog = logging.getLogger("BitBake.Parsing") +providerlog = logging.getLogger("BitBake.Provider") + +class NoSpecificMatch(bb.BBHandledException): + """ + Exception raised when no or multiple file matches are found + """ + +class NothingToBuild(Exception): + """ + Exception raised when there is nothing to build + """ + +class CollectionError(bb.BBHandledException): + """ + Exception raised when layer configuration is incorrect + """ + +class state: + initial, parsing, running, shutdown, forceshutdown, stopped, error = range(7) + + +class SkippedPackage: + def __init__(self, info = None, reason = None): + self.pn = None + self.skipreason = None + self.provides = None + self.rprovides = None + + if info: + self.pn = info.pn + self.skipreason = info.skipreason + self.provides = info.provides + self.rprovides = info.rprovides + elif reason: + self.skipreason = reason + + +class CookerFeatures(object): + _feature_list = [HOB_EXTRA_CACHES, SEND_DEPENDS_TREE, BASEDATASTORE_TRACKING, SEND_SANITYEVENTS] = range(4) + + def __init__(self): + self._features=set() + + def setFeature(self, f): + # validate we got a request for a feature we support + if f not in CookerFeatures._feature_list: + return + self._features.add(f) + + def __contains__(self, f): + return f in self._features + + def __iter__(self): + return self._features.__iter__() + + def next(self): + return self._features.next() + + +#============================================================================# +# BBCooker +#============================================================================# +class BBCooker: + """ + Manages one bitbake build run + """ + + def __init__(self, configuration, featureSet = []): + self.recipecache = None + self.skiplist = {} + self.featureset = CookerFeatures() + for f in featureSet: + self.featureset.setFeature(f) + + self.configuration = configuration + + self.initConfigurationData() + + # Take a lock so only one copy of bitbake can run against a given build + # directory at a time + lockfile = self.data.expand("${TOPDIR}/bitbake.lock") + self.lock = bb.utils.lockfile(lockfile, False, False) + if not self.lock: + bb.fatal("Only one copy of bitbake should be run against a build directory") + try: + self.lock.seek(0) + self.lock.truncate() + if len(configuration.interface) >= 2: + self.lock.write("%s:%s\n" % (configuration.interface[0], configuration.interface[1])); + self.lock.flush() + except: + pass + + # TOSTOP must not be set or our children will hang when they output + fd = sys.stdout.fileno() + if os.isatty(fd): + import termios + tcattr = termios.tcgetattr(fd) + if tcattr[3] & termios.TOSTOP: + buildlog.info("The terminal had the TOSTOP bit set, clearing...") + tcattr[3] = tcattr[3] & ~termios.TOSTOP + termios.tcsetattr(fd, termios.TCSANOW, tcattr) + + self.command = bb.command.Command(self) + self.state = state.initial + + self.parser = None + + signal.signal(signal.SIGTERM, self.sigterm_exception) + + def sigterm_exception(self, signum, stackframe): + bb.warn("Cooker recieved SIGTERM, shutting down...") + self.state = state.forceshutdown + + def setFeatures(self, features): + # we only accept a new feature set if we're in state initial, so we can reset without problems + if self.state != state.initial: + raise Exception("Illegal state for feature set change") + original_featureset = list(self.featureset) + for feature in features: + self.featureset.setFeature(feature) + bb.debug(1, "Features set %s (was %s)" % (original_featureset, list(self.featureset))) + if (original_featureset != list(self.featureset)): + self.reset() + + def initConfigurationData(self): + + self.state = state.initial + self.caches_array = [] + + if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset: + self.enableDataTracking() + + all_extra_cache_names = [] + # We hardcode all known cache types in a single place, here. + if CookerFeatures.HOB_EXTRA_CACHES in self.featureset: + all_extra_cache_names.append("bb.cache_extra:HobRecipeInfo") + + caches_name_array = ['bb.cache:CoreRecipeInfo'] + all_extra_cache_names + + # At least CoreRecipeInfo will be loaded, so caches_array will never be empty! + # This is the entry point, no further check needed! + for var in caches_name_array: + try: + module_name, cache_name = var.split(':') + module = __import__(module_name, fromlist=(cache_name,)) + self.caches_array.append(getattr(module, cache_name)) + except ImportError as exc: + logger.critical("Unable to import extra RecipeInfo '%s' from '%s': %s" % (cache_name, module_name, exc)) + sys.exit("FATAL: Failed to import extra cache class '%s'." % cache_name) + + self.databuilder = bb.cookerdata.CookerDataBuilder(self.configuration, False) + self.databuilder.parseBaseConfiguration() + self.data = self.databuilder.data + self.data_hash = self.databuilder.data_hash + + # + # Special updated configuration we use for firing events + # + self.event_data = bb.data.createCopy(self.data) + bb.data.update_data(self.event_data) + bb.parse.init_parser(self.event_data) + + if CookerFeatures.BASEDATASTORE_TRACKING in self.featureset: + self.disableDataTracking() + + def enableDataTracking(self): + self.configuration.tracking = True + if hasattr(self, "data"): + self.data.enableTracking() + + def disableDataTracking(self): + self.configuration.tracking = False + if hasattr(self, "data"): + self.data.disableTracking() + + def modifyConfigurationVar(self, var, val, default_file, op): + if op == "append": + self.appendConfigurationVar(var, val, default_file) + elif op == "set": + self.saveConfigurationVar(var, val, default_file, "=") + elif op == "earlyAssign": + self.saveConfigurationVar(var, val, default_file, "?=") + + + def appendConfigurationVar(self, var, val, default_file): + #add append var operation to the end of default_file + default_file = bb.cookerdata.findConfigFile(default_file, self.data) + + total = "#added by hob" + total += "\n%s += \"%s\"\n" % (var, val) + + with open(default_file, 'a') as f: + f.write(total) + + #add to history + loginfo = {"op":append, "file":default_file, "line":total.count("\n")} + self.data.appendVar(var, val, **loginfo) + + def saveConfigurationVar(self, var, val, default_file, op): + + replaced = False + #do not save if nothing changed + if str(val) == self.data.getVar(var): + return + + conf_files = self.data.varhistory.get_variable_files(var) + + #format the value when it is a list + if isinstance(val, list): + listval = "" + for value in val: + listval += "%s " % value + val = listval + + topdir = self.data.getVar("TOPDIR") + + #comment or replace operations made on var + for conf_file in conf_files: + if topdir in conf_file: + with open(conf_file, 'r') as f: + contents = f.readlines() + + lines = self.data.varhistory.get_variable_lines(var, conf_file) + for line in lines: + total = "" + i = 0 + for c in contents: + total += c + i = i + 1 + if i==int(line): + end_index = len(total) + index = total.rfind(var, 0, end_index) + + begin_line = total.count("\n",0,index) + end_line = int(line) + + #check if the variable was saved before in the same way + #if true it replace the place where the variable was declared + #else it comments it + if contents[begin_line-1]== "#added by hob\n": + contents[begin_line] = "%s %s \"%s\"\n" % (var, op, val) + replaced = True + else: + for ii in range(begin_line, end_line): + contents[ii] = "#" + contents[ii] + + with open(conf_file, 'w') as f: + f.writelines(contents) + + if replaced == False: + #remove var from history + self.data.varhistory.del_var_history(var) + + #add var to the end of default_file + default_file = bb.cookerdata.findConfigFile(default_file, self.data) + + #add the variable on a single line, to be easy to replace the second time + total = "\n#added by hob" + total += "\n%s %s \"%s\"\n" % (var, op, val) + + with open(default_file, 'a') as f: + f.write(total) + + #add to history + loginfo = {"op":set, "file":default_file, "line":total.count("\n")} + self.data.setVar(var, val, **loginfo) + + def removeConfigurationVar(self, var): + conf_files = self.data.varhistory.get_variable_files(var) + topdir = self.data.getVar("TOPDIR") + + for conf_file in conf_files: + if topdir in conf_file: + with open(conf_file, 'r') as f: + contents = f.readlines() + + lines = self.data.varhistory.get_variable_lines(var, conf_file) + for line in lines: + total = "" + i = 0 + for c in contents: + total += c + i = i + 1 + if i==int(line): + end_index = len(total) + index = total.rfind(var, 0, end_index) + + begin_line = total.count("\n",0,index) + + #check if the variable was saved before in the same way + if contents[begin_line-1]== "#added by hob\n": + contents[begin_line-1] = contents[begin_line] = "\n" + else: + contents[begin_line] = "\n" + #remove var from history + self.data.varhistory.del_var_history(var, conf_file, line) + #remove variable + self.data.delVar(var) + + with open(conf_file, 'w') as f: + f.writelines(contents) + + def createConfigFile(self, name): + path = os.getcwd() + confpath = os.path.join(path, "conf", name) + open(confpath, 'w').close() + + def parseConfiguration(self): + # Set log file verbosity + verboselogs = bb.utils.to_boolean(self.data.getVar("BB_VERBOSE_LOGS", "0")) + if verboselogs: + bb.msg.loggerVerboseLogs = True + + # Change nice level if we're asked to + nice = self.data.getVar("BB_NICE_LEVEL", True) + if nice: + curnice = os.nice(0) + nice = int(nice) - curnice + buildlog.verbose("Renice to %s " % os.nice(nice)) + + if self.recipecache: + del self.recipecache + self.recipecache = bb.cache.CacheData(self.caches_array) + + self.handleCollections( self.data.getVar("BBFILE_COLLECTIONS", True) ) + + def runCommands(self, server, data, abort): + """ + Run any queued asynchronous command + This is done by the idle handler so it runs in true context rather than + tied to any UI. + """ + + return self.command.runAsyncCommand() + + def showVersions(self): + + pkg_pn = self.recipecache.pkg_pn + (latest_versions, preferred_versions) = bb.providers.findProviders(self.data, self.recipecache, pkg_pn) + + logger.plain("%-35s %25s %25s", "Recipe Name", "Latest Version", "Preferred Version") + logger.plain("%-35s %25s %25s\n", "===========", "==============", "=================") + + for p in sorted(pkg_pn): + pref = preferred_versions[p] + latest = latest_versions[p] + + prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2] + lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2] + + if pref == latest: + prefstr = "" + + logger.plain("%-35s %25s %25s", p, lateststr, prefstr) + + def showEnvironment(self, buildfile = None, pkgs_to_build = []): + """ + Show the outer or per-package environment + """ + fn = None + envdata = None + + if buildfile: + # Parse the configuration here. We need to do it explicitly here since + # this showEnvironment() code path doesn't use the cache + self.parseConfiguration() + + fn, cls = bb.cache.Cache.virtualfn2realfn(buildfile) + fn = self.matchFile(fn) + fn = bb.cache.Cache.realfn2virtual(fn, cls) + elif len(pkgs_to_build) == 1: + ignore = self.data.getVar("ASSUME_PROVIDED", True) or "" + if pkgs_to_build[0] in set(ignore.split()): + bb.fatal("%s is in ASSUME_PROVIDED" % pkgs_to_build[0]) + + taskdata, runlist, pkgs_to_build = self.buildTaskData(pkgs_to_build, None, self.configuration.abort) + + targetid = taskdata.getbuild_id(pkgs_to_build[0]) + fnid = taskdata.build_targets[targetid][0] + fn = taskdata.fn_index[fnid] + else: + envdata = self.data + + if fn: + try: + envdata = bb.cache.Cache.loadDataFull(fn, self.collection.get_file_appends(fn), self.data) + except Exception as e: + parselog.exception("Unable to read %s", fn) + raise + + # Display history + with closing(StringIO()) as env: + self.data.inchistory.emit(env) + logger.plain(env.getvalue()) + + # emit variables and shell functions + data.update_data(envdata) + with closing(StringIO()) as env: + data.emit_env(env, envdata, True) + logger.plain(env.getvalue()) + + # emit the metadata which isnt valid shell + data.expandKeys(envdata) + for e in envdata.keys(): + if data.getVarFlag( e, 'python', envdata ): + logger.plain("\npython %s () {\n%s}\n", e, data.getVar(e, envdata, 1)) + + + def buildTaskData(self, pkgs_to_build, task, abort): + """ + Prepare a runqueue and taskdata object for iteration over pkgs_to_build + """ + bb.event.fire(bb.event.TreeDataPreparationStarted(), self.data) + + # A task of None means use the default task + if task is None: + task = self.configuration.cmd + + fulltargetlist = self.checkPackages(pkgs_to_build) + + localdata = data.createCopy(self.data) + bb.data.update_data(localdata) + bb.data.expandKeys(localdata) + taskdata = bb.taskdata.TaskData(abort, skiplist=self.skiplist) + + current = 0 + runlist = [] + for k in fulltargetlist: + ktask = task + if ":do_" in k: + k2 = k.split(":do_") + k = k2[0] + ktask = k2[1] + taskdata.add_provider(localdata, self.recipecache, k) + current += 1 + runlist.append([k, "do_%s" % ktask]) + bb.event.fire(bb.event.TreeDataPreparationProgress(current, len(fulltargetlist)), self.data) + taskdata.add_unresolved(localdata, self.recipecache) + bb.event.fire(bb.event.TreeDataPreparationCompleted(len(fulltargetlist)), self.data) + return taskdata, runlist, fulltargetlist + + def prepareTreeData(self, pkgs_to_build, task): + """ + Prepare a runqueue and taskdata object for iteration over pkgs_to_build + """ + + # We set abort to False here to prevent unbuildable targets raising + # an exception when we're just generating data + taskdata, runlist, pkgs_to_build = self.buildTaskData(pkgs_to_build, task, False) + + return runlist, taskdata + + ######## WARNING : this function requires cache_extra to be enabled ######## + + def generateTaskDepTreeData(self, pkgs_to_build, task): + """ + Create a dependency graph of pkgs_to_build including reverse dependency + information. + """ + runlist, taskdata = self.prepareTreeData(pkgs_to_build, task) + rq = bb.runqueue.RunQueue(self, self.data, self.recipecache, taskdata, runlist) + rq.rqdata.prepare() + return self.buildDependTree(rq, taskdata) + + + def buildDependTree(self, rq, taskdata): + seen_fnids = [] + depend_tree = {} + depend_tree["depends"] = {} + depend_tree["tdepends"] = {} + depend_tree["pn"] = {} + depend_tree["rdepends-pn"] = {} + depend_tree["packages"] = {} + depend_tree["rdepends-pkg"] = {} + depend_tree["rrecs-pkg"] = {} + depend_tree["layer-priorities"] = self.recipecache.bbfile_config_priorities + + for task in xrange(len(rq.rqdata.runq_fnid)): + taskname = rq.rqdata.runq_task[task] + fnid = rq.rqdata.runq_fnid[task] + fn = taskdata.fn_index[fnid] + pn = self.recipecache.pkg_fn[fn] + version = "%s:%s-%s" % self.recipecache.pkg_pepvpr[fn] + if pn not in depend_tree["pn"]: + depend_tree["pn"][pn] = {} + depend_tree["pn"][pn]["filename"] = fn + depend_tree["pn"][pn]["version"] = version + depend_tree["pn"][pn]["inherits"] = self.recipecache.inherits.get(fn, None) + + # if we have extra caches, list all attributes they bring in + extra_info = [] + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, bb.cache.RecipeInfoCommon) and hasattr(cache_class, 'cachefields'): + cachefields = getattr(cache_class, 'cachefields', []) + extra_info = extra_info + cachefields + + # for all attributes stored, add them to the dependency tree + for ei in extra_info: + depend_tree["pn"][pn][ei] = vars(self.recipecache)[ei][fn] + + + for dep in rq.rqdata.runq_depends[task]: + depfn = taskdata.fn_index[rq.rqdata.runq_fnid[dep]] + deppn = self.recipecache.pkg_fn[depfn] + dotname = "%s.%s" % (pn, rq.rqdata.runq_task[task]) + if not dotname in depend_tree["tdepends"]: + depend_tree["tdepends"][dotname] = [] + depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, rq.rqdata.runq_task[dep])) + if fnid not in seen_fnids: + seen_fnids.append(fnid) + packages = [] + + depend_tree["depends"][pn] = [] + for dep in taskdata.depids[fnid]: + depend_tree["depends"][pn].append(taskdata.build_names_index[dep]) + + depend_tree["rdepends-pn"][pn] = [] + for rdep in taskdata.rdepids[fnid]: + depend_tree["rdepends-pn"][pn].append(taskdata.run_names_index[rdep]) + + rdepends = self.recipecache.rundeps[fn] + for package in rdepends: + depend_tree["rdepends-pkg"][package] = [] + for rdepend in rdepends[package]: + depend_tree["rdepends-pkg"][package].append(rdepend) + packages.append(package) + + rrecs = self.recipecache.runrecs[fn] + for package in rrecs: + depend_tree["rrecs-pkg"][package] = [] + for rdepend in rrecs[package]: + depend_tree["rrecs-pkg"][package].append(rdepend) + if not package in packages: + packages.append(package) + + for package in packages: + if package not in depend_tree["packages"]: + depend_tree["packages"][package] = {} + depend_tree["packages"][package]["pn"] = pn + depend_tree["packages"][package]["filename"] = fn + depend_tree["packages"][package]["version"] = version + + return depend_tree + + ######## WARNING : this function requires cache_extra to be enabled ######## + def generatePkgDepTreeData(self, pkgs_to_build, task): + """ + Create a dependency tree of pkgs_to_build, returning the data. + """ + _, taskdata = self.prepareTreeData(pkgs_to_build, task) + tasks_fnid = [] + if len(taskdata.tasks_name) != 0: + for task in xrange(len(taskdata.tasks_name)): + tasks_fnid.append(taskdata.tasks_fnid[task]) + + seen_fnids = [] + depend_tree = {} + depend_tree["depends"] = {} + depend_tree["pn"] = {} + depend_tree["rdepends-pn"] = {} + depend_tree["rdepends-pkg"] = {} + depend_tree["rrecs-pkg"] = {} + + # if we have extra caches, list all attributes they bring in + extra_info = [] + for cache_class in self.caches_array: + if type(cache_class) is type and issubclass(cache_class, bb.cache.RecipeInfoCommon) and hasattr(cache_class, 'cachefields'): + cachefields = getattr(cache_class, 'cachefields', []) + extra_info = extra_info + cachefields + + for task in xrange(len(tasks_fnid)): + fnid = tasks_fnid[task] + fn = taskdata.fn_index[fnid] + pn = self.recipecache.pkg_fn[fn] + + if pn not in depend_tree["pn"]: + depend_tree["pn"][pn] = {} + depend_tree["pn"][pn]["filename"] = fn + version = "%s:%s-%s" % self.recipecache.pkg_pepvpr[fn] + depend_tree["pn"][pn]["version"] = version + rdepends = self.recipecache.rundeps[fn] + rrecs = self.recipecache.runrecs[fn] + depend_tree["pn"][pn]["inherits"] = self.recipecache.inherits.get(fn, None) + + # for all extra attributes stored, add them to the dependency tree + for ei in extra_info: + depend_tree["pn"][pn][ei] = vars(self.recipecache)[ei][fn] + + if fnid not in seen_fnids: + seen_fnids.append(fnid) + + depend_tree["depends"][pn] = [] + for dep in taskdata.depids[fnid]: + item = taskdata.build_names_index[dep] + pn_provider = "" + targetid = taskdata.getbuild_id(item) + if targetid in taskdata.build_targets and taskdata.build_targets[targetid]: + id = taskdata.build_targets[targetid][0] + fn_provider = taskdata.fn_index[id] + pn_provider = self.recipecache.pkg_fn[fn_provider] + else: + pn_provider = item + depend_tree["depends"][pn].append(pn_provider) + + depend_tree["rdepends-pn"][pn] = [] + for rdep in taskdata.rdepids[fnid]: + item = taskdata.run_names_index[rdep] + pn_rprovider = "" + targetid = taskdata.getrun_id(item) + if targetid in taskdata.run_targets and taskdata.run_targets[targetid]: + id = taskdata.run_targets[targetid][0] + fn_rprovider = taskdata.fn_index[id] + pn_rprovider = self.recipecache.pkg_fn[fn_rprovider] + else: + pn_rprovider = item + depend_tree["rdepends-pn"][pn].append(pn_rprovider) + + depend_tree["rdepends-pkg"].update(rdepends) + depend_tree["rrecs-pkg"].update(rrecs) + + return depend_tree + + def generateDepTreeEvent(self, pkgs_to_build, task): + """ + Create a task dependency graph of pkgs_to_build. + Generate an event with the result + """ + depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) + bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.data) + + def generateDotGraphFiles(self, pkgs_to_build, task): + """ + Create a task dependency graph of pkgs_to_build. + Save the result to a set of .dot files. + """ + + depgraph = self.generateTaskDepTreeData(pkgs_to_build, task) + + # Prints a flattened form of package-depends below where subpackages of a package are merged into the main pn + depends_file = file('pn-depends.dot', 'w' ) + buildlist_file = file('pn-buildlist', 'w' ) + print("digraph depends {", file=depends_file) + for pn in depgraph["pn"]: + fn = depgraph["pn"][pn]["filename"] + version = depgraph["pn"][pn]["version"] + print('"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn), file=depends_file) + print("%s" % pn, file=buildlist_file) + buildlist_file.close() + logger.info("PN build list saved to 'pn-buildlist'") + for pn in depgraph["depends"]: + for depend in depgraph["depends"][pn]: + print('"%s" -> "%s"' % (pn, depend), file=depends_file) + for pn in depgraph["rdepends-pn"]: + for rdepend in depgraph["rdepends-pn"][pn]: + print('"%s" -> "%s" [style=dashed]' % (pn, rdepend), file=depends_file) + print("}", file=depends_file) + logger.info("PN dependencies saved to 'pn-depends.dot'") + + depends_file = file('package-depends.dot', 'w' ) + print("digraph depends {", file=depends_file) + for package in depgraph["packages"]: + pn = depgraph["packages"][package]["pn"] + fn = depgraph["packages"][package]["filename"] + version = depgraph["packages"][package]["version"] + if package == pn: + print('"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn), file=depends_file) + else: + print('"%s" [label="%s(%s) %s\\n%s"]' % (package, package, pn, version, fn), file=depends_file) + for depend in depgraph["depends"][pn]: + print('"%s" -> "%s"' % (package, depend), file=depends_file) + for package in depgraph["rdepends-pkg"]: + for rdepend in depgraph["rdepends-pkg"][package]: + print('"%s" -> "%s" [style=dashed]' % (package, rdepend), file=depends_file) + for package in depgraph["rrecs-pkg"]: + for rdepend in depgraph["rrecs-pkg"][package]: + print('"%s" -> "%s" [style=dashed]' % (package, rdepend), file=depends_file) + print("}", file=depends_file) + logger.info("Package dependencies saved to 'package-depends.dot'") + + tdepends_file = file('task-depends.dot', 'w' ) + print("digraph depends {", file=tdepends_file) + for task in depgraph["tdepends"]: + (pn, taskname) = task.rsplit(".", 1) + fn = depgraph["pn"][pn]["filename"] + version = depgraph["pn"][pn]["version"] + print('"%s.%s" [label="%s %s\\n%s\\n%s"]' % (pn, taskname, pn, taskname, version, fn), file=tdepends_file) + for dep in depgraph["tdepends"][task]: + print('"%s" -> "%s"' % (task, dep), file=tdepends_file) + print("}", file=tdepends_file) + logger.info("Task dependencies saved to 'task-depends.dot'") + + def show_appends_with_no_recipes( self ): + appends_without_recipes = [self.collection.appendlist[recipe] + for recipe in self.collection.appendlist + if recipe not in self.collection.appliedappendlist] + if appends_without_recipes: + appendlines = (' %s' % append + for appends in appends_without_recipes + for append in appends) + msg = 'No recipes available for:\n%s' % '\n'.join(appendlines) + warn_only = data.getVar("BB_DANGLINGAPPENDS_WARNONLY", \ + self.data, False) or "no" + if warn_only.lower() in ("1", "yes", "true"): + bb.warn(msg) + else: + bb.fatal(msg) + + def handlePrefProviders(self): + + localdata = data.createCopy(self.data) + bb.data.update_data(localdata) + bb.data.expandKeys(localdata) + + # Handle PREFERRED_PROVIDERS + for p in (localdata.getVar('PREFERRED_PROVIDERS', True) or "").split(): + try: + (providee, provider) = p.split(':') + except: + providerlog.critical("Malformed option in PREFERRED_PROVIDERS variable: %s" % p) + continue + if providee in self.recipecache.preferred and self.recipecache.preferred[providee] != provider: + providerlog.error("conflicting preferences for %s: both %s and %s specified", providee, provider, self.recipecache.preferred[providee]) + self.recipecache.preferred[providee] = provider + + def findCoreBaseFiles(self, subdir, configfile): + corebase = self.data.getVar('COREBASE', True) or "" + paths = [] + for root, dirs, files in os.walk(corebase + '/' + subdir): + for d in dirs: + configfilepath = os.path.join(root, d, configfile) + if os.path.exists(configfilepath): + paths.append(os.path.join(root, d)) + + if paths: + bb.event.fire(bb.event.CoreBaseFilesFound(paths), self.data) + + def findConfigFilePath(self, configfile): + """ + Find the location on disk of configfile and if it exists and was parsed by BitBake + emit the ConfigFilePathFound event with the path to the file. + """ + path = bb.cookerdata.findConfigFile(configfile, self.data) + if not path: + return + + # Generate a list of parsed configuration files by searching the files + # listed in the __depends and __base_depends variables with a .conf suffix. + conffiles = [] + dep_files = self.data.getVar('__base_depends') or [] + dep_files = dep_files + (self.data.getVar('__depends') or []) + + for f in dep_files: + if f[0].endswith(".conf"): + conffiles.append(f[0]) + + _, conf, conffile = path.rpartition("conf/") + match = os.path.join(conf, conffile) + # Try and find matches for conf/conffilename.conf as we don't always + # have the full path to the file. + for cfg in conffiles: + if cfg.endswith(match): + bb.event.fire(bb.event.ConfigFilePathFound(path), + self.data) + break + + def findFilesMatchingInDir(self, filepattern, directory): + """ + Searches for files matching the regex 'pattern' which are children of + 'directory' in each BBPATH. i.e. to find all rootfs package classes available + to BitBake one could call findFilesMatchingInDir(self, 'rootfs_', 'classes') + or to find all machine configuration files one could call: + findFilesMatchingInDir(self, 'conf/machines', 'conf') + """ + import re + + matches = [] + p = re.compile(re.escape(filepattern)) + bbpaths = self.data.getVar('BBPATH', True).split(':') + for path in bbpaths: + dirpath = os.path.join(path, directory) + if os.path.exists(dirpath): + for root, dirs, files in os.walk(dirpath): + for f in files: + if p.search(f): + matches.append(f) + + if matches: + bb.event.fire(bb.event.FilesMatchingFound(filepattern, matches), self.data) + + def findConfigFiles(self, varname): + """ + Find config files which are appropriate values for varname. + i.e. MACHINE, DISTRO + """ + possible = [] + var = varname.lower() + + data = self.data + # iterate configs + bbpaths = data.getVar('BBPATH', True).split(':') + for path in bbpaths: + confpath = os.path.join(path, "conf", var) + if os.path.exists(confpath): + for root, dirs, files in os.walk(confpath): + # get all child files, these are appropriate values + for f in files: + val, sep, end = f.rpartition('.') + if end == 'conf': + possible.append(val) + + if possible: + bb.event.fire(bb.event.ConfigFilesFound(var, possible), self.data) + + def findInheritsClass(self, klass): + """ + Find all recipes which inherit the specified class + """ + pkg_list = [] + + for pfn in self.recipecache.pkg_fn: + inherits = self.recipecache.inherits.get(pfn, None) + if inherits and inherits.count(klass) > 0: + pkg_list.append(self.recipecache.pkg_fn[pfn]) + + return pkg_list + + def generateTargetsTree(self, klass=None, pkgs=[]): + """ + Generate a dependency tree of buildable targets + Generate an event with the result + """ + # if the caller hasn't specified a pkgs list default to universe + if not len(pkgs): + pkgs = ['universe'] + # if inherited_class passed ensure all recipes which inherit the + # specified class are included in pkgs + if klass: + extra_pkgs = self.findInheritsClass(klass) + pkgs = pkgs + extra_pkgs + + # generate a dependency tree for all our packages + tree = self.generatePkgDepTreeData(pkgs, 'build') + bb.event.fire(bb.event.TargetsTreeGenerated(tree), self.data) + + def buildWorldTargetList(self): + """ + Build package list for "bitbake world" + """ + parselog.debug(1, "collating packages for \"world\"") + for f in self.recipecache.possible_world: + terminal = True + pn = self.recipecache.pkg_fn[f] + + for p in self.recipecache.pn_provides[pn]: + if p.startswith('virtual/'): + parselog.debug(2, "World build skipping %s due to %s provider starting with virtual/", f, p) + terminal = False + break + for pf in self.recipecache.providers[p]: + if self.recipecache.pkg_fn[pf] != pn: + parselog.debug(2, "World build skipping %s due to both us and %s providing %s", f, pf, p) + terminal = False + break + if terminal: + self.recipecache.world_target.add(pn) + + def interactiveMode( self ): + """Drop off into a shell""" + try: + from bb import shell + except ImportError: + parselog.exception("Interactive mode not available") + sys.exit(1) + else: + shell.start( self ) + + + def handleCollections( self, collections ): + """Handle collections""" + errors = False + self.recipecache.bbfile_config_priorities = [] + if collections: + collection_priorities = {} + collection_depends = {} + collection_list = collections.split() + min_prio = 0 + for c in collection_list: + # Get collection priority if defined explicitly + priority = self.data.getVar("BBFILE_PRIORITY_%s" % c, True) + if priority: + try: + prio = int(priority) + except ValueError: + parselog.error("invalid value for BBFILE_PRIORITY_%s: \"%s\"", c, priority) + errors = True + if min_prio == 0 or prio < min_prio: + min_prio = prio + collection_priorities[c] = prio + else: + collection_priorities[c] = None + + # Check dependencies and store information for priority calculation + deps = self.data.getVar("LAYERDEPENDS_%s" % c, True) + if deps: + depnamelist = [] + deplist = deps.split() + for dep in deplist: + depsplit = dep.split(':') + if len(depsplit) > 1: + try: + depver = int(depsplit[1]) + except ValueError: + parselog.error("invalid version value in LAYERDEPENDS_%s: \"%s\"", c, dep) + errors = True + continue + else: + depver = None + dep = depsplit[0] + depnamelist.append(dep) + + if dep in collection_list: + if depver: + layerver = self.data.getVar("LAYERVERSION_%s" % dep, True) + if layerver: + try: + lver = int(layerver) + except ValueError: + parselog.error("invalid value for LAYERVERSION_%s: \"%s\"", c, layerver) + errors = True + continue + if lver != depver: + parselog.error("Layer '%s' depends on version %d of layer '%s', but version %d is enabled in your configuration", c, depver, dep, lver) + errors = True + else: + parselog.error("Layer '%s' depends on version %d of layer '%s', which exists in your configuration but does not specify a version", c, depver, dep) + errors = True + else: + parselog.error("Layer '%s' depends on layer '%s', but this layer is not enabled in your configuration", c, dep) + errors = True + collection_depends[c] = depnamelist + else: + collection_depends[c] = [] + + # Recursively work out collection priorities based on dependencies + def calc_layer_priority(collection): + if not collection_priorities[collection]: + max_depprio = min_prio + for dep in collection_depends[collection]: + calc_layer_priority(dep) + depprio = collection_priorities[dep] + if depprio > max_depprio: + max_depprio = depprio + max_depprio += 1 + parselog.debug(1, "Calculated priority of layer %s as %d", collection, max_depprio) + collection_priorities[collection] = max_depprio + + # Calculate all layer priorities using calc_layer_priority and store in bbfile_config_priorities + for c in collection_list: + calc_layer_priority(c) + regex = self.data.getVar("BBFILE_PATTERN_%s" % c, True) + if regex == None: + parselog.error("BBFILE_PATTERN_%s not defined" % c) + errors = True + continue + try: + cre = re.compile(regex) + except re.error: + parselog.error("BBFILE_PATTERN_%s \"%s\" is not a valid regular expression", c, regex) + errors = True + continue + self.recipecache.bbfile_config_priorities.append((c, regex, cre, collection_priorities[c])) + if errors: + # We've already printed the actual error(s) + raise CollectionError("Errors during parsing layer configuration") + + def buildSetVars(self): + """ + Setup any variables needed before starting a build + """ + if not self.data.getVar("BUILDNAME"): + self.data.setVar("BUILDNAME", time.strftime('%Y%m%d%H%M')) + self.data.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S', time.gmtime())) + + def matchFiles(self, bf): + """ + Find the .bb files which match the expression in 'buildfile'. + """ + if bf.startswith("/") or bf.startswith("../"): + bf = os.path.abspath(bf) + + self.collection = CookerCollectFiles(self.recipecache.bbfile_config_priorities) + filelist, masked = self.collection.collect_bbfiles(self.data, self.event_data) + try: + os.stat(bf) + bf = os.path.abspath(bf) + return [bf] + except OSError: + regexp = re.compile(bf) + matches = [] + for f in filelist: + if regexp.search(f) and os.path.isfile(f): + matches.append(f) + return matches + + def matchFile(self, buildfile): + """ + Find the .bb file which matches the expression in 'buildfile'. + Raise an error if multiple files + """ + matches = self.matchFiles(buildfile) + if len(matches) != 1: + if matches: + msg = "Unable to match '%s' to a specific recipe file - %s matches found:" % (buildfile, len(matches)) + if matches: + for f in matches: + msg += "\n %s" % f + parselog.error(msg) + else: + parselog.error("Unable to find any recipe file matching '%s'" % buildfile) + raise NoSpecificMatch + return matches[0] + + def buildFile(self, buildfile, task): + """ + Build the file matching regexp buildfile + """ + + # Too many people use -b because they think it's how you normally + # specify a target to be built, so show a warning + bb.warn("Buildfile specified, dependencies will not be handled. If this is not what you want, do not use -b / --buildfile.") + + # Parse the configuration here. We need to do it explicitly here since + # buildFile() doesn't use the cache + self.parseConfiguration() + + # If we are told to do the None task then query the default task + if (task == None): + task = self.configuration.cmd + + fn, cls = bb.cache.Cache.virtualfn2realfn(buildfile) + fn = self.matchFile(fn) + + self.buildSetVars() + + infos = bb.cache.Cache.parse(fn, self.collection.get_file_appends(fn), \ + self.data, + self.caches_array) + infos = dict(infos) + + fn = bb.cache.Cache.realfn2virtual(fn, cls) + try: + info_array = infos[fn] + except KeyError: + bb.fatal("%s does not exist" % fn) + + if info_array[0].skipped: + bb.fatal("%s was skipped: %s" % (fn, info_array[0].skipreason)) + + self.recipecache.add_from_recipeinfo(fn, info_array) + + # Tweak some variables + item = info_array[0].pn + self.recipecache.ignored_dependencies = set() + self.recipecache.bbfile_priority[fn] = 1 + + # Remove external dependencies + self.recipecache.task_deps[fn]['depends'] = {} + self.recipecache.deps[fn] = [] + self.recipecache.rundeps[fn] = [] + self.recipecache.runrecs[fn] = [] + + # Invalidate task for target if force mode active + if self.configuration.force: + logger.verbose("Invalidate task %s, %s", task, fn) + bb.parse.siggen.invalidate_task('do_%s' % task, self.recipecache, fn) + + # Setup taskdata structure + taskdata = bb.taskdata.TaskData(self.configuration.abort) + taskdata.add_provider(self.data, self.recipecache, item) + + buildname = self.data.getVar("BUILDNAME") + bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.event_data) + + # Execute the runqueue + runlist = [[item, "do_%s" % task]] + + rq = bb.runqueue.RunQueue(self, self.data, self.recipecache, taskdata, runlist) + + def buildFileIdle(server, rq, abort): + + msg = None + if abort or self.state == state.forceshutdown: + rq.finish_runqueue(True) + msg = "Forced shutdown" + elif self.state == state.shutdown: + rq.finish_runqueue(False) + msg = "Stopped build" + failures = 0 + try: + retval = rq.execute_runqueue() + except runqueue.TaskFailure as exc: + failures += len(exc.args) + retval = False + except SystemExit as exc: + self.command.finishAsyncCommand() + return False + + if not retval: + bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runq_fnid), buildname, item, failures), self.event_data) + self.command.finishAsyncCommand(msg) + return False + if retval is True: + return True + return retval + + self.configuration.server_register_idlecallback(buildFileIdle, rq) + + def buildTargets(self, targets, task): + """ + Attempt to build the targets specified + """ + + def buildTargetsIdle(server, rq, abort): + msg = None + if abort or self.state == state.forceshutdown: + rq.finish_runqueue(True) + msg = "Forced shutdown" + elif self.state == state.shutdown: + rq.finish_runqueue(False) + msg = "Stopped build" + failures = 0 + try: + retval = rq.execute_runqueue() + except runqueue.TaskFailure as exc: + failures += len(exc.args) + retval = False + except SystemExit as exc: + self.command.finishAsyncCommand() + return False + + if not retval: + bb.event.fire(bb.event.BuildCompleted(len(rq.rqdata.runq_fnid), buildname, targets, failures), self.data) + self.command.finishAsyncCommand(msg) + return False + if retval is True: + return True + return retval + + self.buildSetVars() + + taskdata, runlist, fulltargetlist = self.buildTaskData(targets, task, self.configuration.abort) + + buildname = self.data.getVar("BUILDNAME") + bb.event.fire(bb.event.BuildStarted(buildname, fulltargetlist), self.data) + + rq = bb.runqueue.RunQueue(self, self.data, self.recipecache, taskdata, runlist) + if 'universe' in targets: + rq.rqdata.warn_multi_bb = True + + self.configuration.server_register_idlecallback(buildTargetsIdle, rq) + + + def getAllKeysWithFlags(self, flaglist): + dump = {} + for k in self.data.keys(): + try: + v = self.data.getVar(k, True) + if not k.startswith("__") and not isinstance(v, bb.data_smart.DataSmart): + dump[k] = { + 'v' : v , + 'history' : self.data.varhistory.variable(k), + } + for d in flaglist: + dump[k][d] = self.data.getVarFlag(k, d) + except Exception as e: + print(e) + return dump + + + def generateNewImage(self, image, base_image, package_queue, timestamp, description): + ''' + Create a new image with a "require"/"inherit" base_image statement + ''' + import re + if timestamp: + image_name = os.path.splitext(image)[0] + timestr = time.strftime("-%Y%m%d-%H%M%S") + dest = image_name + str(timestr) + ".bb" + else: + if not image.endswith(".bb"): + dest = image + ".bb" + else: + dest = image + + basename = False + if base_image: + with open(base_image, 'r') as f: + require_line = f.readline() + p = re.compile("IMAGE_BASENAME *=") + for line in f: + if p.search(line): + basename = True + + with open(dest, "w") as imagefile: + if base_image is None: + imagefile.write("inherit core-image\n") + else: + topdir = self.data.getVar("TOPDIR") + if topdir in base_image: + base_image = require_line.split()[1] + imagefile.write("require " + base_image + "\n") + image_install = "IMAGE_INSTALL = \"" + for package in package_queue: + image_install += str(package) + " " + image_install += "\"\n" + imagefile.write(image_install) + + description_var = "DESCRIPTION = \"" + description + "\"\n" + imagefile.write(description_var) + + if basename: + # If this is overwritten in a inherited image, reset it to default + image_basename = "IMAGE_BASENAME = \"${PN}\"\n" + imagefile.write(image_basename) + + self.state = state.initial + if timestamp: + return timestr + + # This is called for all async commands when self.state != running + def updateCache(self): + if self.state == state.running: + return + + if self.state in (state.shutdown, state.forceshutdown): + if hasattr(self.parser, 'shutdown'): + self.parser.shutdown(clean=False, force = True) + raise bb.BBHandledException() + + if self.state != state.parsing: + self.parseConfiguration () + if CookerFeatures.SEND_SANITYEVENTS in self.featureset: + bb.event.fire(bb.event.SanityCheck(False), self.data) + + ignore = self.data.getVar("ASSUME_PROVIDED", True) or "" + self.recipecache.ignored_dependencies = set(ignore.split()) + + for dep in self.configuration.extra_assume_provided: + self.recipecache.ignored_dependencies.add(dep) + + self.collection = CookerCollectFiles(self.recipecache.bbfile_config_priorities) + (filelist, masked) = self.collection.collect_bbfiles(self.data, self.event_data) + + self.data.renameVar("__depends", "__base_depends") + + self.parser = CookerParser(self, filelist, masked) + self.state = state.parsing + + if not self.parser.parse_next(): + collectlog.debug(1, "parsing complete") + if self.parser.error: + raise bb.BBHandledException() + self.show_appends_with_no_recipes() + self.handlePrefProviders() + self.recipecache.bbfile_priority = self.collection.collection_priorities(self.recipecache.pkg_fn) + self.state = state.running + return None + + return True + + def checkPackages(self, pkgs_to_build): + + # Return a copy, don't modify the original + pkgs_to_build = pkgs_to_build[:] + + if len(pkgs_to_build) == 0: + raise NothingToBuild + + ignore = (self.data.getVar("ASSUME_PROVIDED", True) or "").split() + for pkg in pkgs_to_build: + if pkg in ignore: + parselog.warn("Explicit target \"%s\" is in ASSUME_PROVIDED, ignoring" % pkg) + + if 'world' in pkgs_to_build: + self.buildWorldTargetList() + pkgs_to_build.remove('world') + for t in self.recipecache.world_target: + pkgs_to_build.append(t) + + if 'universe' in pkgs_to_build: + parselog.warn("The \"universe\" target is only intended for testing and may produce errors.") + parselog.debug(1, "collating packages for \"universe\"") + pkgs_to_build.remove('universe') + for t in self.recipecache.universe_target: + pkgs_to_build.append(t) + + return pkgs_to_build + + + + + def pre_serve(self): + # Empty the environment. The environment will be populated as + # necessary from the data store. + #bb.utils.empty_environment() + try: + self.prhost = prserv.serv.auto_start(self.data) + except prserv.serv.PRServiceConfigError: + bb.event.fire(CookerExit(), self.event_data) + self.state = state.error + return + + def post_serve(self): + prserv.serv.auto_shutdown(self.data) + bb.event.fire(CookerExit(), self.event_data) + + def shutdown(self, force = False): + if force: + self.state = state.forceshutdown + else: + self.state = state.shutdown + + def finishcommand(self): + self.state = state.initial + + def reset(self): + self.initConfigurationData() + +def server_main(cooker, func, *args): + cooker.pre_serve() + + if cooker.configuration.profile: + try: + import cProfile as profile + except: + import profile + prof = profile.Profile() + + ret = profile.Profile.runcall(prof, func, *args) + + prof.dump_stats("profile.log") + bb.utils.process_profilelog("profile.log") + print("Raw profiling information saved to profile.log and processed statistics to profile.log.processed") + + else: + ret = func(*args) + + cooker.post_serve() + + return ret + +class CookerExit(bb.event.Event): + """ + Notify clients of the Cooker shutdown + """ + + def __init__(self): + bb.event.Event.__init__(self) + + +class CookerCollectFiles(object): + def __init__(self, priorities): + self.appendlist = {} + self.appliedappendlist = [] + self.bbfile_config_priorities = priorities + + def calc_bbfile_priority( self, filename, matched = None ): + for _, _, regex, pri in self.bbfile_config_priorities: + if regex.match(filename): + if matched != None: + if not regex in matched: + matched.add(regex) + return pri + return 0 + + def get_bbfiles(self): + """Get list of default .bb files by reading out the current directory""" + path = os.getcwd() + contents = os.listdir(path) + bbfiles = [] + for f in contents: + if f.endswith(".bb"): + bbfiles.append(os.path.abspath(os.path.join(path, f))) + return bbfiles + + def find_bbfiles(self, path): + """Find all the .bb and .bbappend files in a directory""" + found = [] + for dir, dirs, files in os.walk(path): + for ignored in ('SCCS', 'CVS', '.svn'): + if ignored in dirs: + dirs.remove(ignored) + found += [os.path.join(dir, f) for f in files if (f.endswith('.bb') or f.endswith('.bbappend'))] + + return found + + def collect_bbfiles(self, config, eventdata): + """Collect all available .bb build files""" + masked = 0 + + collectlog.debug(1, "collecting .bb files") + + files = (config.getVar( "BBFILES", True) or "").split() + config.setVar("BBFILES", " ".join(files)) + + # Sort files by priority + files.sort( key=lambda fileitem: self.calc_bbfile_priority(fileitem) ) + + if not len(files): + files = self.get_bbfiles() + + if not len(files): + collectlog.error("no recipe files to build, check your BBPATH and BBFILES?") + bb.event.fire(CookerExit(), eventdata) + + # Can't use set here as order is important + newfiles = [] + for f in files: + if os.path.isdir(f): + dirfiles = self.find_bbfiles(f) + for g in dirfiles: + if g not in newfiles: + newfiles.append(g) + else: + globbed = glob.glob(f) + if not globbed and os.path.exists(f): + globbed = [f] + for g in globbed: + if g not in newfiles: + newfiles.append(g) + + bbmask = config.getVar('BBMASK', True) + + if bbmask: + try: + bbmask_compiled = re.compile(bbmask) + except sre_constants.error: + collectlog.critical("BBMASK is not a valid regular expression, ignoring.") + return list(newfiles), 0 + + bbfiles = [] + bbappend = [] + for f in newfiles: + if bbmask and bbmask_compiled.search(f): + collectlog.debug(1, "skipping masked file %s", f) + masked += 1 + continue + if f.endswith('.bb'): + bbfiles.append(f) + elif f.endswith('.bbappend'): + bbappend.append(f) + else: + collectlog.debug(1, "skipping %s: unknown file extension", f) + + # Build a list of .bbappend files for each .bb file + for f in bbappend: + base = os.path.basename(f).replace('.bbappend', '.bb') + if not base in self.appendlist: + self.appendlist[base] = [] + if f not in self.appendlist[base]: + self.appendlist[base].append(f) + + # Find overlayed recipes + # bbfiles will be in priority order which makes this easy + bbfile_seen = dict() + self.overlayed = defaultdict(list) + for f in reversed(bbfiles): + base = os.path.basename(f) + if base not in bbfile_seen: + bbfile_seen[base] = f + else: + topfile = bbfile_seen[base] + self.overlayed[topfile].append(f) + + return (bbfiles, masked) + + def get_file_appends(self, fn): + """ + Returns a list of .bbappend files to apply to fn + """ + filelist = [] + f = os.path.basename(fn) + for bbappend in self.appendlist: + if (bbappend == f) or ('%' in bbappend and bbappend.startswith(f[:bbappend.index('%')])): + self.appliedappendlist.append(bbappend) + for filename in self.appendlist[bbappend]: + filelist.append(filename) + return filelist + + def collection_priorities(self, pkgfns): + + priorities = {} + + # Calculate priorities for each file + matched = set() + for p in pkgfns: + realfn, cls = bb.cache.Cache.virtualfn2realfn(p) + priorities[p] = self.calc_bbfile_priority(realfn, matched) + + # Don't show the warning if the BBFILE_PATTERN did match .bbappend files + unmatched = set() + for _, _, regex, pri in self.bbfile_config_priorities: + if not regex in matched: + unmatched.add(regex) + + def findmatch(regex): + for bbfile in self.appendlist: + for append in self.appendlist[bbfile]: + if regex.match(append): + return True + return False + + for unmatch in unmatched.copy(): + if findmatch(unmatch): + unmatched.remove(unmatch) + + for collection, pattern, regex, _ in self.bbfile_config_priorities: + if regex in unmatched: + collectlog.warn("No bb files matched BBFILE_PATTERN_%s '%s'" % (collection, pattern)) + + return priorities + +class ParsingFailure(Exception): + def __init__(self, realexception, recipe): + self.realexception = realexception + self.recipe = recipe + Exception.__init__(self, realexception, recipe) + +class Feeder(multiprocessing.Process): + def __init__(self, jobs, to_parsers, quit): + self.quit = quit + self.jobs = jobs + self.to_parsers = to_parsers + multiprocessing.Process.__init__(self) + + def run(self): + while True: + try: + quit = self.quit.get_nowait() + except Queue.Empty: + pass + else: + if quit == 'cancel': + self.to_parsers.cancel_join_thread() + break + + try: + job = self.jobs.pop() + except IndexError: + break + + try: + self.to_parsers.put(job, timeout=0.5) + except Queue.Full: + self.jobs.insert(0, job) + continue + +class Parser(multiprocessing.Process): + def __init__(self, jobs, results, quit, init, profile): + self.jobs = jobs + self.results = results + self.quit = quit + self.init = init + multiprocessing.Process.__init__(self) + self.context = bb.utils.get_context().copy() + self.handlers = bb.event.get_class_handlers().copy() + self.profile = profile + + def run(self): + + if not self.profile: + self.realrun() + return + + try: + import cProfile as profile + except: + import profile + prof = profile.Profile() + try: + profile.Profile.runcall(prof, self.realrun) + finally: + logfile = "profile-parse-%s.log" % multiprocessing.current_process().name + prof.dump_stats(logfile) + bb.utils.process_profilelog(logfile) + print("Raw profiling information saved to %s and processed statistics to %s.processed" % (logfile, logfile)) + + def realrun(self): + if self.init: + self.init() + + pending = [] + while True: + try: + self.quit.get_nowait() + except Queue.Empty: + pass + else: + self.results.cancel_join_thread() + break + + if pending: + result = pending.pop() + else: + try: + job = self.jobs.get(timeout=0.25) + except Queue.Empty: + continue + + if job is None: + break + result = self.parse(*job) + + try: + self.results.put(result, timeout=0.25) + except Queue.Full: + pending.append(result) + + def parse(self, filename, appends, caches_array): + try: + # Reset our environment and handlers to the original settings + bb.utils.set_context(self.context.copy()) + bb.event.set_class_handlers(self.handlers.copy()) + return True, bb.cache.Cache.parse(filename, appends, self.cfg, caches_array) + except Exception as exc: + tb = sys.exc_info()[2] + exc.recipe = filename + exc.traceback = list(bb.exceptions.extract_traceback(tb, context=3)) + return True, exc + # Need to turn BaseExceptions into Exceptions here so we gracefully shutdown + # and for example a worker thread doesn't just exit on its own in response to + # a SystemExit event for example. + except BaseException as exc: + return True, ParsingFailure(exc, filename) + +class CookerParser(object): + def __init__(self, cooker, filelist, masked): + self.filelist = filelist + self.cooker = cooker + self.cfgdata = cooker.data + self.cfghash = cooker.data_hash + + # Accounting statistics + self.parsed = 0 + self.cached = 0 + self.error = 0 + self.masked = masked + + self.skipped = 0 + self.virtuals = 0 + self.total = len(filelist) + + self.current = 0 + self.num_processes = int(self.cfgdata.getVar("BB_NUMBER_PARSE_THREADS", True) or + multiprocessing.cpu_count()) + + self.bb_cache = bb.cache.Cache(self.cfgdata, self.cfghash, cooker.caches_array) + self.fromcache = [] + self.willparse = [] + for filename in self.filelist: + appends = self.cooker.collection.get_file_appends(filename) + if not self.bb_cache.cacheValid(filename, appends): + self.willparse.append((filename, appends, cooker.caches_array)) + else: + self.fromcache.append((filename, appends)) + self.toparse = self.total - len(self.fromcache) + self.progress_chunk = max(self.toparse / 100, 1) + + self.start() + self.haveshutdown = False + + def start(self): + self.results = self.load_cached() + self.processes = [] + if self.toparse: + bb.event.fire(bb.event.ParseStarted(self.toparse), self.cfgdata) + def init(): + Parser.cfg = self.cfgdata + multiprocessing.util.Finalize(None, bb.codeparser.parser_cache_save, args=(self.cfgdata,), exitpriority=1) + multiprocessing.util.Finalize(None, bb.fetch.fetcher_parse_save, args=(self.cfgdata,), exitpriority=1) + + self.feeder_quit = multiprocessing.Queue(maxsize=1) + self.parser_quit = multiprocessing.Queue(maxsize=self.num_processes) + self.jobs = multiprocessing.Queue(maxsize=self.num_processes) + self.result_queue = multiprocessing.Queue() + self.feeder = Feeder(self.willparse, self.jobs, self.feeder_quit) + self.feeder.start() + for i in range(0, self.num_processes): + parser = Parser(self.jobs, self.result_queue, self.parser_quit, init, self.cooker.configuration.profile) + parser.start() + self.processes.append(parser) + + self.results = itertools.chain(self.results, self.parse_generator()) + + def shutdown(self, clean=True, force=False): + if not self.toparse: + return + if self.haveshutdown: + return + self.haveshutdown = True + + if clean: + event = bb.event.ParseCompleted(self.cached, self.parsed, + self.skipped, self.masked, + self.virtuals, self.error, + self.total) + + bb.event.fire(event, self.cfgdata) + self.feeder_quit.put(None) + for process in self.processes: + self.jobs.put(None) + else: + self.feeder_quit.put('cancel') + + self.parser_quit.cancel_join_thread() + for process in self.processes: + self.parser_quit.put(None) + + self.jobs.cancel_join_thread() + + for process in self.processes: + if force: + process.join(.1) + process.terminate() + else: + process.join() + self.feeder.join() + + sync = threading.Thread(target=self.bb_cache.sync) + sync.start() + multiprocessing.util.Finalize(None, sync.join, exitpriority=-100) + bb.codeparser.parser_cache_savemerge(self.cooker.data) + bb.fetch.fetcher_parse_done(self.cooker.data) + + def load_cached(self): + for filename, appends in self.fromcache: + cached, infos = self.bb_cache.load(filename, appends, self.cfgdata) + yield not cached, infos + + def parse_generator(self): + while True: + if self.parsed >= self.toparse: + break + + try: + result = self.result_queue.get(timeout=0.25) + except Queue.Empty: + pass + else: + value = result[1] + if isinstance(value, BaseException): + raise value + else: + yield result + + def parse_next(self): + result = [] + parsed = None + try: + parsed, result = self.results.next() + except StopIteration: + self.shutdown() + return False + except bb.BBHandledException as exc: + self.error += 1 + logger.error('Failed to parse recipe: %s' % exc.recipe) + self.shutdown(clean=False) + return False + except ParsingFailure as exc: + self.error += 1 + logger.error('Unable to parse %s: %s' % + (exc.recipe, bb.exceptions.to_string(exc.realexception))) + self.shutdown(clean=False) + return False + except bb.parse.ParseError as exc: + self.error += 1 + logger.error(str(exc)) + self.shutdown(clean=False) + return False + except bb.data_smart.ExpansionError as exc: + self.error += 1 + _, value, _ = sys.exc_info() + logger.error('ExpansionError during parsing %s: %s', value.recipe, str(exc)) + self.shutdown(clean=False) + return False + except SyntaxError as exc: + self.error += 1 + logger.error('Unable to parse %s', exc.recipe) + self.shutdown(clean=False) + return False + except Exception as exc: + self.error += 1 + etype, value, tb = sys.exc_info() + if hasattr(value, "recipe"): + logger.error('Unable to parse %s', value.recipe, + exc_info=(etype, value, exc.traceback)) + else: + # Most likely, an exception occurred during raising an exception + import traceback + logger.error('Exception during parse: %s' % traceback.format_exc()) + self.shutdown(clean=False) + return False + + self.current += 1 + self.virtuals += len(result) + if parsed: + self.parsed += 1 + if self.parsed % self.progress_chunk == 0: + bb.event.fire(bb.event.ParseProgress(self.parsed, self.toparse), + self.cfgdata) + else: + self.cached += 1 + + for virtualfn, info_array in result: + if info_array[0].skipped: + self.skipped += 1 + self.cooker.skiplist[virtualfn] = SkippedPackage(info_array[0]) + self.bb_cache.add_info(virtualfn, info_array, self.cooker.recipecache, + parsed=parsed) + return True + + def reparse(self, filename): + infos = self.bb_cache.parse(filename, + self.cooker.collection.get_file_appends(filename), + self.cfgdata, self.cooker.caches_array) + for vfn, info_array in infos: + self.cooker.recipecache.add_from_recipeinfo(vfn, info_array) diff --git a/bitbake/lib/bb/cookerdata.py b/bitbake/lib/bb/cookerdata.py new file mode 100644 index 0000000000..b9b9e16675 --- /dev/null +++ b/bitbake/lib/bb/cookerdata.py @@ -0,0 +1,305 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer +# Copyright (C) 2005 Holger Hans Peter Freyther +# Copyright (C) 2005 ROAD GmbH +# Copyright (C) 2006 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os, sys +from functools import wraps +import logging +import bb +from bb import data +import bb.parse + +logger = logging.getLogger("BitBake") +parselog = logging.getLogger("BitBake.Parsing") + +class ConfigParameters(object): + def __init__(self): + self.options, targets = self.parseCommandLine() + self.environment = self.parseEnvironment() + + self.options.pkgs_to_build = targets or [] + + self.options.tracking = False + if hasattr(self.options, "show_environment") and self.options.show_environment: + self.options.tracking = True + + for key, val in self.options.__dict__.items(): + setattr(self, key, val) + + def parseCommandLine(self): + raise Exception("Caller must implement commandline option parsing") + + def parseEnvironment(self): + return os.environ.copy() + + def updateFromServer(self, server): + if not self.options.cmd: + defaulttask, error = server.runCommand(["getVariable", "BB_DEFAULT_TASK"]) + if error: + raise Exception("Unable to get the value of BB_DEFAULT_TASK from the server: %s" % error) + self.options.cmd = defaulttask or "build" + _, error = server.runCommand(["setConfig", "cmd", self.options.cmd]) + if error: + raise Exception("Unable to set configuration option 'cmd' on the server: %s" % error) + + if not self.options.pkgs_to_build: + bbpkgs, error = server.runCommand(["getVariable", "BBPKGS"]) + if error: + raise Exception("Unable to get the value of BBPKGS from the server: %s" % error) + if bbpkgs: + self.options.pkgs_to_build.extend(bbpkgs.split()) + + def parseActions(self): + # Parse any commandline into actions + action = {'action':None, 'msg':None} + if self.options.show_environment: + if 'world' in self.options.pkgs_to_build: + action['msg'] = "'world' is not a valid target for --environment." + elif 'universe' in self.options.pkgs_to_build: + action['msg'] = "'universe' is not a valid target for --environment." + elif len(self.options.pkgs_to_build) > 1: + action['msg'] = "Only one target can be used with the --environment option." + elif self.options.buildfile and len(self.options.pkgs_to_build) > 0: + action['msg'] = "No target should be used with the --environment and --buildfile options." + elif len(self.options.pkgs_to_build) > 0: + action['action'] = ["showEnvironmentTarget", self.options.pkgs_to_build] + else: + action['action'] = ["showEnvironment", self.options.buildfile] + elif self.options.buildfile is not None: + action['action'] = ["buildFile", self.options.buildfile, self.options.cmd] + elif self.options.revisions_changed: + action['action'] = ["compareRevisions"] + elif self.options.show_versions: + action['action'] = ["showVersions"] + elif self.options.parse_only: + action['action'] = ["parseFiles"] + elif self.options.dot_graph: + if self.options.pkgs_to_build: + action['action'] = ["generateDotGraph", self.options.pkgs_to_build, self.options.cmd] + else: + action['msg'] = "Please specify a package name for dependency graph generation." + else: + if self.options.pkgs_to_build: + action['action'] = ["buildTargets", self.options.pkgs_to_build, self.options.cmd] + else: + #action['msg'] = "Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information." + action = None + self.options.initialaction = action + return action + +class CookerConfiguration(object): + """ + Manages build options and configurations for one run + """ + + def __init__(self): + self.debug_domains = [] + self.extra_assume_provided = [] + self.prefile = [] + self.postfile = [] + self.debug = 0 + self.cmd = None + self.abort = True + self.force = False + self.profile = False + self.nosetscene = False + self.invalidate_stamp = False + self.dump_signatures = [] + self.dry_run = False + self.tracking = False + self.interface = [] + + self.env = {} + + def setConfigParameters(self, parameters): + for key in self.__dict__.keys(): + if key in parameters.options.__dict__: + setattr(self, key, parameters.options.__dict__[key]) + self.env = parameters.environment.copy() + self.tracking = parameters.tracking + + def setServerRegIdleCallback(self, srcb): + self.server_register_idlecallback = srcb + + def __getstate__(self): + state = {} + for key in self.__dict__.keys(): + if key == "server_register_idlecallback": + state[key] = None + else: + state[key] = getattr(self, key) + return state + + def __setstate__(self,state): + for k in state: + setattr(self, k, state[k]) + + +def catch_parse_error(func): + """Exception handling bits for our parsing""" + @wraps(func) + def wrapped(fn, *args): + try: + return func(fn, *args) + except (IOError, bb.parse.ParseError, bb.data_smart.ExpansionError) as exc: + import traceback + parselog.critical( traceback.format_exc()) + parselog.critical("Unable to parse %s: %s" % (fn, exc)) + sys.exit(1) + return wrapped + +@catch_parse_error +def parse_config_file(fn, data, include=True): + return bb.parse.handle(fn, data, include) + +@catch_parse_error +def _inherit(bbclass, data): + bb.parse.BBHandler.inherit(bbclass, "configuration INHERITs", 0, data) + return data + +def findConfigFile(configfile, data): + search = [] + bbpath = data.getVar("BBPATH", True) + if bbpath: + for i in bbpath.split(":"): + search.append(os.path.join(i, "conf", configfile)) + path = os.getcwd() + while path != "/": + search.append(os.path.join(path, "conf", configfile)) + path, _ = os.path.split(path) + + for i in search: + if os.path.exists(i): + return i + + return None + +class CookerDataBuilder(object): + + def __init__(self, cookercfg, worker = False): + + self.prefiles = cookercfg.prefile + self.postfiles = cookercfg.postfile + self.tracking = cookercfg.tracking + + bb.utils.set_context(bb.utils.clean_context()) + bb.event.set_class_handlers(bb.event.clean_class_handlers()) + self.data = bb.data.init() + if self.tracking: + self.data.enableTracking() + + # Keep a datastore of the initial environment variables and their + # values from when BitBake was launched to enable child processes + # to use environment variables which have been cleaned from the + # BitBake processes env + self.savedenv = bb.data.init() + for k in cookercfg.env: + self.savedenv.setVar(k, cookercfg.env[k]) + + filtered_keys = bb.utils.approved_variables() + bb.data.inheritFromOS(self.data, self.savedenv, filtered_keys) + self.data.setVar("BB_ORIGENV", self.savedenv) + + if worker: + self.data.setVar("BB_WORKERCONTEXT", "1") + + def parseBaseConfiguration(self): + try: + self.parseConfigurationFiles(self.prefiles, self.postfiles) + except SyntaxError: + sys.exit(1) + except Exception: + logger.exception("Error parsing configuration files") + sys.exit(1) + + def _findLayerConf(self, data): + return findConfigFile("bblayers.conf", data) + + def parseConfigurationFiles(self, prefiles, postfiles): + data = self.data + bb.parse.init_parser(data) + + # Parse files for loading *before* bitbake.conf and any includes + for f in prefiles: + data = parse_config_file(f, data) + + layerconf = self._findLayerConf(data) + if layerconf: + parselog.debug(2, "Found bblayers.conf (%s)", layerconf) + # By definition bblayers.conf is in conf/ of TOPDIR. + # We may have been called with cwd somewhere else so reset TOPDIR + data.setVar("TOPDIR", os.path.dirname(os.path.dirname(layerconf))) + data = parse_config_file(layerconf, data) + + layers = (data.getVar('BBLAYERS', True) or "").split() + + data = bb.data.createCopy(data) + for layer in layers: + parselog.debug(2, "Adding layer %s", layer) + data.setVar('LAYERDIR', layer) + data = parse_config_file(os.path.join(layer, "conf", "layer.conf"), data) + data.expandVarref('LAYERDIR') + + data.delVar('LAYERDIR') + + if not data.getVar("BBPATH", True): + msg = "The BBPATH variable is not set" + if not layerconf: + msg += (" and bitbake did not find a conf/bblayers.conf file in" + " the expected location.\nMaybe you accidentally" + " invoked bitbake from the wrong directory?") + raise SystemExit(msg) + + data = parse_config_file(os.path.join("conf", "bitbake.conf"), data) + + # Parse files for loading *after* bitbake.conf and any includes + for p in postfiles: + data = parse_config_file(p, data) + + # Handle any INHERITs and inherit the base class + bbclasses = ["base"] + (data.getVar('INHERIT', True) or "").split() + for bbclass in bbclasses: + data = _inherit(bbclass, data) + + # Nomally we only register event handlers at the end of parsing .bb files + # We register any handlers we've found so far here... + for var in data.getVar('__BBHANDLERS') or []: + bb.event.register(var, data.getVar(var), (data.getVarFlag(var, "eventmask", True) or "").split()) + + if data.getVar("BB_WORKERCONTEXT", False) is None: + bb.fetch.fetcher_init(data) + bb.codeparser.parser_cache_init(data) + bb.event.fire(bb.event.ConfigParsed(), data) + + if data.getVar("BB_INVALIDCONF") is True: + data.setVar("BB_INVALIDCONF", False) + self.parseConfigurationFiles(self.prefiles, self.postfiles) + return + + bb.parse.init_parser(data) + data.setVar('BBINCLUDED',bb.parse.get_file_depends(data)) + self.data = data + self.data_hash = data.get_hash() + + + diff --git a/bitbake/lib/bb/daemonize.py b/bitbake/lib/bb/daemonize.py new file mode 100644 index 0000000000..f0714b3af6 --- /dev/null +++ b/bitbake/lib/bb/daemonize.py @@ -0,0 +1,190 @@ +""" +Python Deamonizing helper + +Configurable daemon behaviors: + + 1.) The current working directory set to the "/" directory. + 2.) The current file creation mode mask set to 0. + 3.) Close all open files (1024). + 4.) Redirect standard I/O streams to "/dev/null". + +A failed call to fork() now raises an exception. + +References: + 1) Advanced Programming in the Unix Environment: W. Richard Stevens + 2) Unix Programming Frequently Asked Questions: + http://www.erlenstar.demon.co.uk/unix/faq_toc.html + +Modified to allow a function to be daemonized and return for +bitbake use by Richard Purdie +""" + +__author__ = "Chad J. Schroeder" +__copyright__ = "Copyright (C) 2005 Chad J. Schroeder" +__version__ = "0.2" + +# Standard Python modules. +import os # Miscellaneous OS interfaces. +import sys # System-specific parameters and functions. + +# Default daemon parameters. +# File mode creation mask of the daemon. +# For BitBake's children, we do want to inherit the parent umask. +UMASK = None + +# Default maximum for the number of available file descriptors. +MAXFD = 1024 + +# The standard I/O file descriptors are redirected to /dev/null by default. +if (hasattr(os, "devnull")): + REDIRECT_TO = os.devnull +else: + REDIRECT_TO = "/dev/null" + +def createDaemon(function, logfile): + """ + Detach a process from the controlling terminal and run it in the + background as a daemon, returning control to the caller. + """ + + try: + # Fork a child process so the parent can exit. This returns control to + # the command-line or shell. It also guarantees that the child will not + # be a process group leader, since the child receives a new process ID + # and inherits the parent's process group ID. This step is required + # to insure that the next call to os.setsid is successful. + pid = os.fork() + except OSError as e: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + + if (pid == 0): # The first child. + # To become the session leader of this new session and the process group + # leader of the new process group, we call os.setsid(). The process is + # also guaranteed not to have a controlling terminal. + os.setsid() + + # Is ignoring SIGHUP necessary? + # + # It's often suggested that the SIGHUP signal should be ignored before + # the second fork to avoid premature termination of the process. The + # reason is that when the first child terminates, all processes, e.g. + # the second child, in the orphaned group will be sent a SIGHUP. + # + # "However, as part of the session management system, there are exactly + # two cases where SIGHUP is sent on the death of a process: + # + # 1) When the process that dies is the session leader of a session that + # is attached to a terminal device, SIGHUP is sent to all processes + # in the foreground process group of that terminal device. + # 2) When the death of a process causes a process group to become + # orphaned, and one or more processes in the orphaned group are + # stopped, then SIGHUP and SIGCONT are sent to all members of the + # orphaned group." [2] + # + # The first case can be ignored since the child is guaranteed not to have + # a controlling terminal. The second case isn't so easy to dismiss. + # The process group is orphaned when the first child terminates and + # POSIX.1 requires that every STOPPED process in an orphaned process + # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the + # second child is not STOPPED though, we can safely forego ignoring the + # SIGHUP signal. In any case, there are no ill-effects if it is ignored. + # + # import signal # Set handlers for asynchronous events. + # signal.signal(signal.SIGHUP, signal.SIG_IGN) + + try: + # Fork a second child and exit immediately to prevent zombies. This + # causes the second child process to be orphaned, making the init + # process responsible for its cleanup. And, since the first child is + # a session leader without a controlling terminal, it's possible for + # it to acquire one by opening a terminal in the future (System V- + # based systems). This second fork guarantees that the child is no + # longer a session leader, preventing the daemon from ever acquiring + # a controlling terminal. + pid = os.fork() # Fork a second child. + except OSError as e: + raise Exception("%s [%d]" % (e.strerror, e.errno)) + + if (pid == 0): # The second child. + # We probably don't want the file mode creation mask inherited from + # the parent, so we give the child complete control over permissions. + if UMASK is not None: + os.umask(UMASK) + else: + # Parent (the first child) of the second child. + os._exit(0) + else: + # exit() or _exit()? + # _exit is like exit(), but it doesn't call any functions registered + # with atexit (and on_exit) or any registered signal handlers. It also + # closes any open file descriptors. Using exit() may cause all stdio + # streams to be flushed twice and any temporary files may be unexpectedly + # removed. It's therefore recommended that child branches of a fork() + # and the parent branch(es) of a daemon use _exit(). + return + + # Close all open file descriptors. This prevents the child from keeping + # open any file descriptors inherited from the parent. There is a variety + # of methods to accomplish this task. Three are listed below. + # + # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum + # number of open file descriptors to close. If it doesn't exists, use + # the default value (configurable). + # + # try: + # maxfd = os.sysconf("SC_OPEN_MAX") + # except (AttributeError, ValueError): + # maxfd = MAXFD + # + # OR + # + # if (os.sysconf_names.has_key("SC_OPEN_MAX")): + # maxfd = os.sysconf("SC_OPEN_MAX") + # else: + # maxfd = MAXFD + # + # OR + # + # Use the getrlimit method to retrieve the maximum file descriptor number + # that can be opened by this process. If there is not limit on the + # resource, use the default value. + # + import resource # Resource usage information. + maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] + if (maxfd == resource.RLIM_INFINITY): + maxfd = MAXFD + + # Iterate through and close all file descriptors. +# for fd in range(0, maxfd): +# try: +# os.close(fd) +# except OSError: # ERROR, fd wasn't open to begin with (ignored) +# pass + + # Redirect the standard I/O file descriptors to the specified file. Since + # the daemon has no controlling terminal, most daemons redirect stdin, + # stdout, and stderr to /dev/null. This is done to prevent side-effects + # from reads and writes to the standard I/O file descriptors. + + # This call to open is guaranteed to return the lowest file descriptor, + # which will be 0 (stdin), since it was closed above. +# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) + + # Duplicate standard input to standard output and standard error. +# os.dup2(0, 1) # standard output (1) +# os.dup2(0, 2) # standard error (2) + + + si = file('/dev/null', 'r') + so = file(logfile, 'w') + se = so + + + # Replace those fds with our own + os.dup2(si.fileno(), sys.stdin.fileno()) + os.dup2(so.fileno(), sys.stdout.fileno()) + os.dup2(se.fileno(), sys.stderr.fileno()) + + function() + + os._exit(0) diff --git a/bitbake/lib/bb/data.py b/bitbake/lib/bb/data.py new file mode 100644 index 0000000000..db938be1e6 --- /dev/null +++ b/bitbake/lib/bb/data.py @@ -0,0 +1,403 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Data' implementations + +Functions for interacting with the data structure used by the +BitBake build tools. + +The expandData and update_data are the most expensive +operations. At night the cookie monster came by and +suggested 'give me cookies on setting the variables and +things will work out'. Taking this suggestion into account +applying the skills from the not yet passed 'Entwurf und +Analyse von Algorithmen' lecture and the cookie +monster seems to be right. We will track setVar more carefully +to have faster update_data and expandKeys operations. + +This is a treade-off between speed and memory again but +the speed is more critical here. +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2005 Holger Hans Peter Freyther +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +#Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import sys, os, re +if sys.argv[0][-5:] == "pydoc": + path = os.path.dirname(os.path.dirname(sys.argv[1])) +else: + path = os.path.dirname(os.path.dirname(sys.argv[0])) +sys.path.insert(0, path) +from itertools import groupby + +from bb import data_smart +from bb import codeparser +import bb + +logger = data_smart.logger +_dict_type = data_smart.DataSmart + +def init(): + """Return a new object representing the Bitbake data""" + return _dict_type() + +def init_db(parent = None): + """Return a new object representing the Bitbake data, + optionally based on an existing object""" + if parent is not None: + return parent.createCopy() + else: + return _dict_type() + +def createCopy(source): + """Link the source set to the destination + If one does not find the value in the destination set, + search will go on to the source set to get the value. + Value from source are copy-on-write. i.e. any try to + modify one of them will end up putting the modified value + in the destination set. + """ + return source.createCopy() + +def initVar(var, d): + """Non-destructive var init for data structure""" + d.initVar(var) + + +def setVar(var, value, d): + """Set a variable to a given value""" + d.setVar(var, value) + + +def getVar(var, d, exp = 0): + """Gets the value of a variable""" + return d.getVar(var, exp) + + +def renameVar(key, newkey, d): + """Renames a variable from key to newkey""" + d.renameVar(key, newkey) + +def delVar(var, d): + """Removes a variable from the data set""" + d.delVar(var) + +def appendVar(var, value, d): + """Append additional value to a variable""" + d.appendVar(var, value) + +def setVarFlag(var, flag, flagvalue, d): + """Set a flag for a given variable to a given value""" + d.setVarFlag(var, flag, flagvalue) + +def getVarFlag(var, flag, d): + """Gets given flag from given var""" + return d.getVarFlag(var, flag) + +def delVarFlag(var, flag, d): + """Removes a given flag from the variable's flags""" + d.delVarFlag(var, flag) + +def setVarFlags(var, flags, d): + """Set the flags for a given variable + + Note: + setVarFlags will not clear previous + flags. Think of this method as + addVarFlags + """ + d.setVarFlags(var, flags) + +def getVarFlags(var, d): + """Gets a variable's flags""" + return d.getVarFlags(var) + +def delVarFlags(var, d): + """Removes a variable's flags""" + d.delVarFlags(var) + +def keys(d): + """Return a list of keys in d""" + return d.keys() + + +__expand_var_regexp__ = re.compile(r"\${[^{}]+}") +__expand_python_regexp__ = re.compile(r"\${@.+?}") + +def expand(s, d, varname = None): + """Variable expansion using the data store""" + return d.expand(s, varname) + +def expandKeys(alterdata, readdata = None): + if readdata == None: + readdata = alterdata + + todolist = {} + for key in alterdata: + if not '${' in key: + continue + + ekey = expand(key, readdata) + if key == ekey: + continue + todolist[key] = ekey + + # These two for loops are split for performance to maximise the + # usefulness of the expand cache + + for key in todolist: + ekey = todolist[key] + newval = alterdata.getVar(ekey, 0) + if newval: + val = alterdata.getVar(key, 0) + if val is not None and newval is not None: + bb.warn("Variable key %s (%s) replaces original key %s (%s)." % (key, val, ekey, newval)) + alterdata.renameVar(key, ekey) + +def inheritFromOS(d, savedenv, permitted): + """Inherit variables from the initial environment.""" + exportlist = bb.utils.preserved_envvars_exported() + for s in savedenv.keys(): + if s in permitted: + try: + d.setVar(s, getVar(s, savedenv, True), op = 'from env') + if s in exportlist: + d.setVarFlag(s, "export", True, op = 'auto env export') + except TypeError: + pass + +def emit_var(var, o=sys.__stdout__, d = init(), all=False): + """Emit a variable to be sourced by a shell.""" + if getVarFlag(var, "python", d): + return 0 + + export = getVarFlag(var, "export", d) + unexport = getVarFlag(var, "unexport", d) + func = getVarFlag(var, "func", d) + if not all and not export and not unexport and not func: + return 0 + + try: + if all: + oval = getVar(var, d, 0) + val = getVar(var, d, 1) + except (KeyboardInterrupt, bb.build.FuncFailed): + raise + except Exception as exc: + o.write('# expansion of %s threw %s: %s\n' % (var, exc.__class__.__name__, str(exc))) + return 0 + + if all: + d.varhistory.emit(var, oval, val, o) + + if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all: + return 0 + + varExpanded = expand(var, d) + + if unexport: + o.write('unset %s\n' % varExpanded) + return 0 + + if val is None: + return 0 + + val = str(val) + + if func: + # NOTE: should probably check for unbalanced {} within the var + o.write("%s() {\n%s\n}\n" % (varExpanded, val)) + return 1 + + if export: + o.write('export ') + + # if we're going to output this within doublequotes, + # to a shell, we need to escape the quotes in the var + alter = re.sub('"', '\\"', val) + alter = re.sub('\n', ' \\\n', alter) + o.write('%s="%s"\n' % (varExpanded, alter)) + return 0 + +def emit_env(o=sys.__stdout__, d = init(), all=False): + """Emits all items in the data store in a format such that it can be sourced by a shell.""" + + isfunc = lambda key: bool(d.getVarFlag(key, "func")) + keys = sorted((key for key in d.keys() if not key.startswith("__")), key=isfunc) + grouped = groupby(keys, isfunc) + for isfunc, keys in grouped: + for key in keys: + emit_var(key, o, d, all and not isfunc) and o.write('\n') + +def exported_keys(d): + return (key for key in d.keys() if not key.startswith('__') and + d.getVarFlag(key, 'export') and + not d.getVarFlag(key, 'unexport')) + +def exported_vars(d): + for key in exported_keys(d): + try: + value = d.getVar(key, True) + except Exception: + pass + + if value is not None: + yield key, str(value) + +def emit_func(func, o=sys.__stdout__, d = init()): + """Emits all items in the data store in a format such that it can be sourced by a shell.""" + + keys = (key for key in d.keys() if not key.startswith("__") and not d.getVarFlag(key, "func")) + for key in keys: + emit_var(key, o, d, False) and o.write('\n') + + emit_var(func, o, d, False) and o.write('\n') + newdeps = bb.codeparser.ShellParser(func, logger).parse_shell(d.getVar(func, True)) + newdeps |= set((d.getVarFlag(func, "vardeps", True) or "").split()) + seen = set() + while newdeps: + deps = newdeps + seen |= deps + newdeps = set() + for dep in deps: + if d.getVarFlag(dep, "func") and not d.getVarFlag(dep, "python"): + emit_var(dep, o, d, False) and o.write('\n') + newdeps |= bb.codeparser.ShellParser(dep, logger).parse_shell(d.getVar(dep, True)) + newdeps |= set((d.getVarFlag(dep, "vardeps", True) or "").split()) + newdeps -= seen + +def update_data(d): + """Performs final steps upon the datastore, including application of overrides""" + d.finalize(parent = True) + +def build_dependencies(key, keys, shelldeps, varflagsexcl, d): + deps = set() + try: + if key[-1] == ']': + vf = key[:-1].split('[') + value = d.getVarFlag(vf[0], vf[1], False) + parser = d.expandWithRefs(value, key) + deps |= parser.references + deps = deps | (keys & parser.execs) + return deps, value + varflags = d.getVarFlags(key, ["vardeps", "vardepvalue", "vardepsexclude", "vardepvalueexclude", "postfuncs", "prefuncs"]) or {} + vardeps = varflags.get("vardeps") + value = d.getVar(key, False) + + def handle_contains(value, contains, d): + newvalue = "" + for k in sorted(contains): + l = (d.getVar(k, True) or "").split() + for word in sorted(contains[k]): + if word in l: + newvalue += "\n%s{%s} = Set" % (k, word) + else: + newvalue += "\n%s{%s} = Unset" % (k, word) + if not newvalue: + return value + if not value: + return newvalue + return value + newvalue + + if "vardepvalue" in varflags: + value = varflags.get("vardepvalue") + elif varflags.get("func"): + if varflags.get("python"): + parsedvar = d.expandWithRefs(value, key) + parser = bb.codeparser.PythonParser(key, logger) + if parsedvar.value and "\t" in parsedvar.value: + logger.warn("Variable %s contains tabs, please remove these (%s)" % (key, d.getVar("FILE", True))) + parser.parse_python(parsedvar.value) + deps = deps | parser.references + value = handle_contains(value, parser.contains, d) + else: + parsedvar = d.expandWithRefs(value, key) + parser = bb.codeparser.ShellParser(key, logger) + parser.parse_shell(parsedvar.value) + deps = deps | shelldeps + if vardeps is None: + parser.log.flush() + if "prefuncs" in varflags: + deps = deps | set(varflags["prefuncs"].split()) + if "postfuncs" in varflags: + deps = deps | set(varflags["postfuncs"].split()) + deps = deps | parsedvar.references + deps = deps | (keys & parser.execs) | (keys & parsedvar.execs) + value = handle_contains(value, parsedvar.contains, d) + else: + parser = d.expandWithRefs(value, key) + deps |= parser.references + deps = deps | (keys & parser.execs) + value = handle_contains(value, parser.contains, d) + + if "vardepvalueexclude" in varflags: + exclude = varflags.get("vardepvalueexclude") + for excl in exclude.split('|'): + if excl: + value = value.replace(excl, '') + + # Add varflags, assuming an exclusion list is set + if varflagsexcl: + varfdeps = [] + for f in varflags: + if f not in varflagsexcl: + varfdeps.append('%s[%s]' % (key, f)) + if varfdeps: + deps |= set(varfdeps) + + deps |= set((vardeps or "").split()) + deps -= set(varflags.get("vardepsexclude", "").split()) + except Exception as e: + raise bb.data_smart.ExpansionError(key, None, e) + return deps, value + #bb.note("Variable %s references %s and calls %s" % (key, str(deps), str(execs))) + #d.setVarFlag(key, "vardeps", deps) + +def generate_dependencies(d): + + keys = set(key for key in d if not key.startswith("__")) + shelldeps = set(key for key in d.getVar("__exportlist", False) if d.getVarFlag(key, "export") and not d.getVarFlag(key, "unexport")) + varflagsexcl = d.getVar('BB_SIGNATURE_EXCLUDE_FLAGS', True) + + deps = {} + values = {} + + tasklist = d.getVar('__BBTASKS') or [] + for task in tasklist: + deps[task], values[task] = build_dependencies(task, keys, shelldeps, varflagsexcl, d) + newdeps = deps[task] + seen = set() + while newdeps: + nextdeps = newdeps + seen |= nextdeps + newdeps = set() + for dep in nextdeps: + if dep not in deps: + deps[dep], values[dep] = build_dependencies(dep, keys, shelldeps, varflagsexcl, d) + newdeps |= deps[dep] + newdeps -= seen + #print "For %s: %s" % (task, str(deps[task])) + return tasklist, deps, values + +def inherits_class(klass, d): + val = getVar('__inherit_cache', d) or [] + needle = os.path.join('classes', '%s.bbclass' % klass) + for v in val: + if v.endswith(needle): + return True + return False diff --git a/bitbake/lib/bb/data_smart.py b/bitbake/lib/bb/data_smart.py new file mode 100644 index 0000000000..e4bdb2fdd9 --- /dev/null +++ b/bitbake/lib/bb/data_smart.py @@ -0,0 +1,804 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake Smart Dictionary Implementation + +Functions for interacting with the data structure used by the +BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2004, 2005 Seb Frankengul +# Copyright (C) 2005, 2006 Holger Hans Peter Freyther +# Copyright (C) 2005 Uli Luckas +# Copyright (C) 2005 ROAD GmbH +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import copy, re, sys, traceback +from collections import MutableMapping +import logging +import hashlib +import bb, bb.codeparser +from bb import utils +from bb.COW import COWDictBase + +logger = logging.getLogger("BitBake.Data") + +__setvar_keyword__ = ["_append", "_prepend", "_remove"] +__setvar_regexp__ = re.compile('(?P.*?)(?P_append|_prepend|_remove)(_(?P.*))?$') +__expand_var_regexp__ = re.compile(r"\${[^{}@\n\t ]+}") +__expand_python_regexp__ = re.compile(r"\${@.+?}") + +def infer_caller_details(loginfo, parent = False, varval = True): + """Save the caller the trouble of specifying everything.""" + # Save effort. + if 'ignore' in loginfo and loginfo['ignore']: + return + # If nothing was provided, mark this as possibly unneeded. + if not loginfo: + loginfo['ignore'] = True + return + # Infer caller's likely values for variable (var) and value (value), + # to reduce clutter in the rest of the code. + if varval and ('variable' not in loginfo or 'detail' not in loginfo): + try: + raise Exception + except Exception: + tb = sys.exc_info()[2] + if parent: + above = tb.tb_frame.f_back.f_back + else: + above = tb.tb_frame.f_back + lcls = above.f_locals.items() + for k, v in lcls: + if k == 'value' and 'detail' not in loginfo: + loginfo['detail'] = v + if k == 'var' and 'variable' not in loginfo: + loginfo['variable'] = v + # Infer file/line/function from traceback + if 'file' not in loginfo: + depth = 3 + if parent: + depth = 4 + file, line, func, text = traceback.extract_stack(limit = depth)[0] + loginfo['file'] = file + loginfo['line'] = line + if func not in loginfo: + loginfo['func'] = func + +class VariableParse: + def __init__(self, varname, d, val = None): + self.varname = varname + self.d = d + self.value = val + + self.references = set() + self.execs = set() + self.contains = {} + + def var_sub(self, match): + key = match.group()[2:-1] + if self.varname and key: + if self.varname == key: + raise Exception("variable %s references itself!" % self.varname) + if key in self.d.expand_cache: + varparse = self.d.expand_cache[key] + var = varparse.value + else: + var = self.d.getVarFlag(key, "_content", True) + self.references.add(key) + if var is not None: + return var + else: + return match.group() + + def python_sub(self, match): + code = match.group()[3:-1] + codeobj = compile(code.strip(), self.varname or "", "eval") + + parser = bb.codeparser.PythonParser(self.varname, logger) + parser.parse_python(code) + if self.varname: + vardeps = self.d.getVarFlag(self.varname, "vardeps", True) + if vardeps is None: + parser.log.flush() + else: + parser.log.flush() + self.references |= parser.references + self.execs |= parser.execs + + for k in parser.contains: + if k not in self.contains: + self.contains[k] = parser.contains[k].copy() + else: + self.contains[k].update(parser.contains[k]) + value = utils.better_eval(codeobj, DataContext(self.d)) + return str(value) + + +class DataContext(dict): + def __init__(self, metadata, **kwargs): + self.metadata = metadata + dict.__init__(self, **kwargs) + self['d'] = metadata + + def __missing__(self, key): + value = self.metadata.getVar(key, True) + if value is None or self.metadata.getVarFlag(key, 'func'): + raise KeyError(key) + else: + return value + +class ExpansionError(Exception): + def __init__(self, varname, expression, exception): + self.expression = expression + self.variablename = varname + self.exception = exception + if varname: + if expression: + self.msg = "Failure expanding variable %s, expression was %s which triggered exception %s: %s" % (varname, expression, type(exception).__name__, exception) + else: + self.msg = "Failure expanding variable %s: %s: %s" % (varname, type(exception).__name__, exception) + else: + self.msg = "Failure expanding expression %s which triggered exception %s: %s" % (expression, type(exception).__name__, exception) + Exception.__init__(self, self.msg) + self.args = (varname, expression, exception) + def __str__(self): + return self.msg + +class IncludeHistory(object): + def __init__(self, parent = None, filename = '[TOP LEVEL]'): + self.parent = parent + self.filename = filename + self.children = [] + self.current = self + + def copy(self): + new = IncludeHistory(self.parent, self.filename) + for c in self.children: + new.children.append(c) + return new + + def include(self, filename): + newfile = IncludeHistory(self.current, filename) + self.current.children.append(newfile) + self.current = newfile + return self + + def __enter__(self): + pass + + def __exit__(self, a, b, c): + if self.current.parent: + self.current = self.current.parent + else: + bb.warn("Include log: Tried to finish '%s' at top level." % filename) + return False + + def emit(self, o, level = 0): + """Emit an include history file, and its children.""" + if level: + spaces = " " * (level - 1) + o.write("# %s%s" % (spaces, self.filename)) + if len(self.children) > 0: + o.write(" includes:") + else: + o.write("#\n# INCLUDE HISTORY:\n#") + level = level + 1 + for child in self.children: + o.write("\n") + child.emit(o, level) + +class VariableHistory(object): + def __init__(self, dataroot): + self.dataroot = dataroot + self.variables = COWDictBase.copy() + + def copy(self): + new = VariableHistory(self.dataroot) + new.variables = self.variables.copy() + return new + + def record(self, *kwonly, **loginfo): + if not self.dataroot._tracking: + return + if len(kwonly) > 0: + raise TypeError + infer_caller_details(loginfo, parent = True) + if 'ignore' in loginfo and loginfo['ignore']: + return + if 'op' not in loginfo or not loginfo['op']: + loginfo['op'] = 'set' + if 'detail' in loginfo: + loginfo['detail'] = str(loginfo['detail']) + if 'variable' not in loginfo or 'file' not in loginfo: + raise ValueError("record() missing variable or file.") + var = loginfo['variable'] + + if var not in self.variables: + self.variables[var] = [] + self.variables[var].append(loginfo.copy()) + + def variable(self, var): + if var in self.variables: + return self.variables[var] + else: + return [] + + def emit(self, var, oval, val, o): + history = self.variable(var) + commentVal = re.sub('\n', '\n#', str(oval)) + if history: + if len(history) == 1: + o.write("#\n# $%s\n" % var) + else: + o.write("#\n# $%s [%d operations]\n" % (var, len(history))) + for event in history: + # o.write("# %s\n" % str(event)) + if 'func' in event: + # If we have a function listed, this is internal + # code, not an operation in a config file, and the + # full path is distracting. + event['file'] = re.sub('.*/', '', event['file']) + display_func = ' [%s]' % event['func'] + else: + display_func = '' + if 'flag' in event: + flag = '[%s] ' % (event['flag']) + else: + flag = '' + o.write("# %s %s:%s%s\n# %s\"%s\"\n" % (event['op'], event['file'], event['line'], display_func, flag, re.sub('\n', '\n# ', event['detail']))) + if len(history) > 1: + o.write("# computed:\n") + o.write('# "%s"\n' % (commentVal)) + else: + o.write("#\n# $%s\n# [no history recorded]\n#\n" % var) + o.write('# "%s"\n' % (commentVal)) + + def get_variable_files(self, var): + """Get the files where operations are made on a variable""" + var_history = self.variable(var) + files = [] + for event in var_history: + files.append(event['file']) + return files + + def get_variable_lines(self, var, f): + """Get the line where a operation is made on a variable in file f""" + var_history = self.variable(var) + lines = [] + for event in var_history: + if f== event['file']: + line = event['line'] + lines.append(line) + return lines + + def del_var_history(self, var, f=None, line=None): + """If file f and line are not given, the entire history of var is deleted""" + if var in self.variables: + if f and line: + self.variables[var] = [ x for x in self.variables[var] if x['file']!=f and x['line']!=line] + else: + self.variables[var] = [] + +class DataSmart(MutableMapping): + def __init__(self, special = COWDictBase.copy(), seen = COWDictBase.copy() ): + self.dict = {} + + self.inchistory = IncludeHistory() + self.varhistory = VariableHistory(self) + self._tracking = False + + # cookie monster tribute + self._special_values = special + self._seen_overrides = seen + + self.expand_cache = {} + + def enableTracking(self): + self._tracking = True + + def disableTracking(self): + self._tracking = False + + def expandWithRefs(self, s, varname): + + if not isinstance(s, basestring): # sanity check + return VariableParse(varname, self, s) + + if varname and varname in self.expand_cache: + return self.expand_cache[varname] + + varparse = VariableParse(varname, self) + + while s.find('${') != -1: + olds = s + try: + s = __expand_var_regexp__.sub(varparse.var_sub, s) + s = __expand_python_regexp__.sub(varparse.python_sub, s) + if s == olds: + break + except ExpansionError: + raise + except bb.parse.SkipPackage: + raise + except Exception as exc: + raise ExpansionError(varname, s, exc) + + varparse.value = s + + if varname: + self.expand_cache[varname] = varparse + + return varparse + + def expand(self, s, varname = None): + return self.expandWithRefs(s, varname).value + + + def finalize(self, parent = False): + """Performs final steps upon the datastore, including application of overrides""" + + overrides = (self.getVar("OVERRIDES", True) or "").split(":") or [] + finalize_caller = { + 'op': 'finalize', + } + infer_caller_details(finalize_caller, parent = parent, varval = False) + + # + # Well let us see what breaks here. We used to iterate + # over each variable and apply the override and then + # do the line expanding. + # If we have bad luck - which we will have - the keys + # where in some order that is so important for this + # method which we don't have anymore. + # Anyway we will fix that and write test cases this + # time. + + # + # First we apply all overrides + # Then we will handle _append and _prepend and store the _remove + # information for later. + # + + # We only want to report finalization once per variable overridden. + finalizes_reported = {} + + for o in overrides: + # calculate '_'+override + l = len(o) + 1 + + # see if one should even try + if o not in self._seen_overrides: + continue + + vars = self._seen_overrides[o].copy() + for var in vars: + name = var[:-l] + try: + # Report only once, even if multiple changes. + if name not in finalizes_reported: + finalizes_reported[name] = True + finalize_caller['variable'] = name + finalize_caller['detail'] = 'was: ' + str(self.getVar(name, False)) + self.varhistory.record(**finalize_caller) + # Copy history of the override over. + for event in self.varhistory.variable(var): + loginfo = event.copy() + loginfo['variable'] = name + loginfo['op'] = 'override[%s]:%s' % (o, loginfo['op']) + self.varhistory.record(**loginfo) + self.setVar(name, self.getVar(var, False), op = 'finalize', file = 'override[%s]' % o, line = '') + self.delVar(var) + except Exception: + logger.info("Untracked delVar") + + # now on to the appends and prepends, and stashing the removes + for op in __setvar_keyword__: + if op in self._special_values: + appends = self._special_values[op] or [] + for append in appends: + keep = [] + for (a, o) in self.getVarFlag(append, op) or []: + match = True + if o: + for o2 in o.split("_"): + if not o2 in overrides: + match = False + if not match: + keep.append((a ,o)) + continue + + if op == "_append": + sval = self.getVar(append, False) or "" + sval += a + self.setVar(append, sval) + elif op == "_prepend": + sval = a + (self.getVar(append, False) or "") + self.setVar(append, sval) + elif op == "_remove": + removes = self.getVarFlag(append, "_removeactive", False) or [] + removes.extend(a.split()) + self.setVarFlag(append, "_removeactive", removes, ignore=True) + + # We save overrides that may be applied at some later stage + if keep: + self.setVarFlag(append, op, keep, ignore=True) + else: + self.delVarFlag(append, op, ignore=True) + + def initVar(self, var): + self.expand_cache = {} + if not var in self.dict: + self.dict[var] = {} + + def _findVar(self, var): + dest = self.dict + while dest: + if var in dest: + return dest[var] + + if "_data" not in dest: + break + dest = dest["_data"] + + def _makeShadowCopy(self, var): + if var in self.dict: + return + + local_var = self._findVar(var) + + if local_var: + self.dict[var] = copy.copy(local_var) + else: + self.initVar(var) + + + def setVar(self, var, value, **loginfo): + #print("var=" + str(var) + " val=" + str(value)) + if 'op' not in loginfo: + loginfo['op'] = "set" + self.expand_cache = {} + match = __setvar_regexp__.match(var) + if match and match.group("keyword") in __setvar_keyword__: + base = match.group('base') + keyword = match.group("keyword") + override = match.group('add') + l = self.getVarFlag(base, keyword) or [] + l.append([value, override]) + self.setVarFlag(base, keyword, l, ignore=True) + # And cause that to be recorded: + loginfo['detail'] = value + loginfo['variable'] = base + if override: + loginfo['op'] = '%s[%s]' % (keyword, override) + else: + loginfo['op'] = keyword + self.varhistory.record(**loginfo) + # todo make sure keyword is not __doc__ or __module__ + # pay the cookie monster + try: + self._special_values[keyword].add(base) + except KeyError: + self._special_values[keyword] = set() + self._special_values[keyword].add(base) + + return + + if not var in self.dict: + self._makeShadowCopy(var) + + # more cookies for the cookie monster + if '_' in var: + self._setvar_update_overrides(var) + + # setting var + self.dict[var]["_content"] = value + self.varhistory.record(**loginfo) + + def _setvar_update_overrides(self, var): + # aka pay the cookie monster + override = var[var.rfind('_')+1:] + if len(override) > 0: + if override not in self._seen_overrides: + self._seen_overrides[override] = set() + self._seen_overrides[override].add( var ) + + def getVar(self, var, expand=False, noweakdefault=False): + return self.getVarFlag(var, "_content", expand, noweakdefault) + + def renameVar(self, key, newkey, **loginfo): + """ + Rename the variable key to newkey + """ + val = self.getVar(key, 0) + if val is not None: + loginfo['variable'] = newkey + loginfo['op'] = 'rename from %s' % key + loginfo['detail'] = val + self.varhistory.record(**loginfo) + self.setVar(newkey, val, ignore=True) + + for i in (__setvar_keyword__): + src = self.getVarFlag(key, i) + if src is None: + continue + + dest = self.getVarFlag(newkey, i) or [] + dest.extend(src) + self.setVarFlag(newkey, i, dest, ignore=True) + + if i in self._special_values and key in self._special_values[i]: + self._special_values[i].remove(key) + self._special_values[i].add(newkey) + + loginfo['variable'] = key + loginfo['op'] = 'rename (to)' + loginfo['detail'] = newkey + self.varhistory.record(**loginfo) + self.delVar(key, ignore=True) + + def appendVar(self, var, value, **loginfo): + loginfo['op'] = 'append' + self.varhistory.record(**loginfo) + newvalue = (self.getVar(var, False) or "") + value + self.setVar(var, newvalue, ignore=True) + + def prependVar(self, var, value, **loginfo): + loginfo['op'] = 'prepend' + self.varhistory.record(**loginfo) + newvalue = value + (self.getVar(var, False) or "") + self.setVar(var, newvalue, ignore=True) + + def delVar(self, var, **loginfo): + loginfo['detail'] = "" + loginfo['op'] = 'del' + self.varhistory.record(**loginfo) + self.expand_cache = {} + self.dict[var] = {} + if '_' in var: + override = var[var.rfind('_')+1:] + if override and override in self._seen_overrides and var in self._seen_overrides[override]: + self._seen_overrides[override].remove(var) + + def setVarFlag(self, var, flag, value, **loginfo): + if 'op' not in loginfo: + loginfo['op'] = "set" + loginfo['flag'] = flag + self.varhistory.record(**loginfo) + if not var in self.dict: + self._makeShadowCopy(var) + self.dict[var][flag] = value + + if flag == "defaultval" and '_' in var: + self._setvar_update_overrides(var) + + if flag == "unexport" or flag == "export": + if not "__exportlist" in self.dict: + self._makeShadowCopy("__exportlist") + if not "_content" in self.dict["__exportlist"]: + self.dict["__exportlist"]["_content"] = set() + self.dict["__exportlist"]["_content"].add(var) + + def getVarFlag(self, var, flag, expand=False, noweakdefault=False): + local_var = self._findVar(var) + value = None + if local_var is not None: + if flag in local_var: + value = copy.copy(local_var[flag]) + elif flag == "_content" and "defaultval" in local_var and not noweakdefault: + value = copy.copy(local_var["defaultval"]) + if expand and value: + # Only getvar (flag == _content) hits the expand cache + cachename = None + if flag == "_content": + cachename = var + else: + cachename = var + "[" + flag + "]" + value = self.expand(value, cachename) + if value is not None and flag == "_content" and local_var is not None and "_removeactive" in local_var: + filtered = filter(lambda v: v not in local_var["_removeactive"], + value.split(" ")) + value = " ".join(filtered) + if expand: + # We need to ensure the expand cache has the correct value + # flag == "_content" here + self.expand_cache[var].value = value + return value + + def delVarFlag(self, var, flag, **loginfo): + local_var = self._findVar(var) + if not local_var: + return + if not var in self.dict: + self._makeShadowCopy(var) + + if var in self.dict and flag in self.dict[var]: + loginfo['detail'] = "" + loginfo['op'] = 'delFlag' + loginfo['flag'] = flag + self.varhistory.record(**loginfo) + + del self.dict[var][flag] + + def appendVarFlag(self, var, flag, value, **loginfo): + loginfo['op'] = 'append' + loginfo['flag'] = flag + self.varhistory.record(**loginfo) + newvalue = (self.getVarFlag(var, flag, False) or "") + value + self.setVarFlag(var, flag, newvalue, ignore=True) + + def prependVarFlag(self, var, flag, value, **loginfo): + loginfo['op'] = 'prepend' + loginfo['flag'] = flag + self.varhistory.record(**loginfo) + newvalue = value + (self.getVarFlag(var, flag, False) or "") + self.setVarFlag(var, flag, newvalue, ignore=True) + + def setVarFlags(self, var, flags, **loginfo): + infer_caller_details(loginfo) + if not var in self.dict: + self._makeShadowCopy(var) + + for i in flags: + if i == "_content": + continue + loginfo['flag'] = i + loginfo['detail'] = flags[i] + self.varhistory.record(**loginfo) + self.dict[var][i] = flags[i] + + def getVarFlags(self, var, expand = False, internalflags=False): + local_var = self._findVar(var) + flags = {} + + if local_var: + for i in local_var: + if i.startswith("_") and not internalflags: + continue + flags[i] = local_var[i] + if expand and i in expand: + flags[i] = self.expand(flags[i], var + "[" + i + "]") + if len(flags) == 0: + return None + return flags + + + def delVarFlags(self, var, **loginfo): + if not var in self.dict: + self._makeShadowCopy(var) + + if var in self.dict: + content = None + + loginfo['op'] = 'delete flags' + self.varhistory.record(**loginfo) + + # try to save the content + if "_content" in self.dict[var]: + content = self.dict[var]["_content"] + self.dict[var] = {} + self.dict[var]["_content"] = content + else: + del self.dict[var] + + + def createCopy(self): + """ + Create a copy of self by setting _data to self + """ + # we really want this to be a DataSmart... + data = DataSmart(seen=self._seen_overrides.copy(), special=self._special_values.copy()) + data.dict["_data"] = self.dict + data.varhistory = self.varhistory.copy() + data.varhistory.datasmart = data + data.inchistory = self.inchistory.copy() + + data._tracking = self._tracking + + return data + + def expandVarref(self, variable, parents=False): + """Find all references to variable in the data and expand it + in place, optionally descending to parent datastores.""" + + if parents: + keys = iter(self) + else: + keys = self.localkeys() + + ref = '${%s}' % variable + value = self.getVar(variable, False) + for key in keys: + referrervalue = self.getVar(key, False) + if referrervalue and ref in referrervalue: + self.setVar(key, referrervalue.replace(ref, value)) + + def localkeys(self): + for key in self.dict: + if key != '_data': + yield key + + def __iter__(self): + def keylist(d): + klist = set() + for key in d: + if key == "_data": + continue + if not d[key]: + continue + klist.add(key) + + if "_data" in d: + klist |= keylist(d["_data"]) + + return klist + + for k in keylist(self.dict): + yield k + + def __len__(self): + return len(frozenset(self)) + + def __getitem__(self, item): + value = self.getVar(item, False) + if value is None: + raise KeyError(item) + else: + return value + + def __setitem__(self, var, value): + self.setVar(var, value) + + def __delitem__(self, var): + self.delVar(var) + + def get_hash(self): + data = {} + d = self.createCopy() + bb.data.expandKeys(d) + bb.data.update_data(d) + + config_whitelist = set((d.getVar("BB_HASHCONFIG_WHITELIST", True) or "").split()) + keys = set(key for key in iter(d) if not key.startswith("__")) + for key in keys: + if key in config_whitelist: + continue + + value = d.getVar(key, False) or "" + data.update({key:value}) + + varflags = d.getVarFlags(key, internalflags = True) + if not varflags: + continue + for f in varflags: + if f == "_content": + continue + data.update({'%s[%s]' % (key, f):varflags[f]}) + + for key in ["__BBTASKS", "__BBANONFUNCS", "__BBHANDLERS"]: + bb_list = d.getVar(key, False) or [] + bb_list.sort() + data.update({key:str(bb_list)}) + + if key == "__BBANONFUNCS": + for i in bb_list: + value = d.getVar(i, True) or "" + data.update({i:value}) + + data_str = str([(k, data[k]) for k in sorted(data.keys())]) + return hashlib.md5(data_str).hexdigest() diff --git a/bitbake/lib/bb/event.py b/bitbake/lib/bb/event.py new file mode 100644 index 0000000000..e2050431ec --- /dev/null +++ b/bitbake/lib/bb/event.py @@ -0,0 +1,641 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Event' implementation + +Classes and functions for manipulating 'events' in the +BitBake build tools. +""" + +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os, sys +import warnings +try: + import cPickle as pickle +except ImportError: + import pickle +import logging +import atexit +import traceback +import bb.utils +import bb.compat +import bb.exceptions + +# This is the pid for which we should generate the event. This is set when +# the runqueue forks off. +worker_pid = 0 +worker_fire = None + +logger = logging.getLogger('BitBake.Event') + +class Event(object): + """Base class for events""" + + def __init__(self): + self.pid = worker_pid + +Registered = 10 +AlreadyRegistered = 14 + +def get_class_handlers(): + return _handlers + +def set_class_handlers(h): + _handlers = h + +def clean_class_handlers(): + return bb.compat.OrderedDict() + +# Internal +_handlers = clean_class_handlers() +_ui_handlers = {} +_ui_logfilters = {} +_ui_handler_seq = 0 +_event_handler_map = {} +_catchall_handlers = {} + +def execute_handler(name, handler, event, d): + event.data = d + try: + ret = handler(event) + except bb.parse.SkipPackage: + raise + except Exception: + etype, value, tb = sys.exc_info() + logger.error("Execution of event handler '%s' failed" % name, + exc_info=(etype, value, tb.tb_next)) + raise + except SystemExit as exc: + if exc.code != 0: + logger.error("Execution of event handler '%s' failed" % name) + raise + finally: + del event.data + +def fire_class_handlers(event, d): + if isinstance(event, logging.LogRecord): + return + + eid = str(event.__class__)[8:-2] + evt_hmap = _event_handler_map.get(eid, {}) + for name, handler in _handlers.iteritems(): + if name in _catchall_handlers or name in evt_hmap: + try: + execute_handler(name, handler, event, d) + except Exception: + continue + +ui_queue = [] +@atexit.register +def print_ui_queue(): + """If we're exiting before a UI has been spawned, display any queued + LogRecords to the console.""" + logger = logging.getLogger("BitBake") + if not _ui_handlers: + from bb.msg import BBLogFormatter + console = logging.StreamHandler(sys.stdout) + console.setFormatter(BBLogFormatter("%(levelname)s: %(message)s")) + logger.handlers = [console] + + # First check to see if we have any proper messages + msgprint = False + for event in ui_queue: + if isinstance(event, logging.LogRecord): + if event.levelno > logging.DEBUG: + logger.handle(event) + msgprint = True + if msgprint: + return + + # Nope, so just print all of the messages we have (including debug messages) + for event in ui_queue: + if isinstance(event, logging.LogRecord): + logger.handle(event) + +def fire_ui_handlers(event, d): + if not _ui_handlers: + # No UI handlers registered yet, queue up the messages + ui_queue.append(event) + return + + errors = [] + for h in _ui_handlers: + #print "Sending event %s" % event + try: + if not _ui_logfilters[h].filter(event): + continue + # We use pickle here since it better handles object instances + # which xmlrpc's marshaller does not. Events *must* be serializable + # by pickle. + if hasattr(_ui_handlers[h].event, "sendpickle"): + _ui_handlers[h].event.sendpickle((pickle.dumps(event))) + else: + _ui_handlers[h].event.send(event) + except: + errors.append(h) + for h in errors: + del _ui_handlers[h] + +def fire(event, d): + """Fire off an Event""" + + # We can fire class handlers in the worker process context and this is + # desired so they get the task based datastore. + # UI handlers need to be fired in the server context so we defer this. They + # don't have a datastore so the datastore context isn't a problem. + + fire_class_handlers(event, d) + if worker_fire: + worker_fire(event, d) + else: + fire_ui_handlers(event, d) + +def fire_from_worker(event, d): + fire_ui_handlers(event, d) + +noop = lambda _: None +def register(name, handler, mask=[]): + """Register an Event handler""" + + # already registered + if name in _handlers: + return AlreadyRegistered + + if handler is not None: + # handle string containing python code + if isinstance(handler, basestring): + tmp = "def %s(e):\n%s" % (name, handler) + try: + code = compile(tmp, "%s(e)" % name, "exec") + except SyntaxError: + logger.error("Unable to register event handler '%s':\n%s", name, + ''.join(traceback.format_exc(limit=0))) + _handlers[name] = noop + return + env = {} + bb.utils.better_exec(code, env) + func = bb.utils.better_eval(name, env) + _handlers[name] = func + else: + _handlers[name] = handler + + if not mask or '*' in mask: + _catchall_handlers[name] = True + else: + for m in mask: + if _event_handler_map.get(m, None) is None: + _event_handler_map[m] = {} + _event_handler_map[m][name] = True + + return Registered + +def remove(name, handler): + """Remove an Event handler""" + _handlers.pop(name) + +def register_UIHhandler(handler): + bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1 + _ui_handlers[_ui_handler_seq] = handler + level, debug_domains = bb.msg.constructLogOptions() + _ui_logfilters[_ui_handler_seq] = UIEventFilter(level, debug_domains) + return _ui_handler_seq + +def unregister_UIHhandler(handlerNum): + if handlerNum in _ui_handlers: + del _ui_handlers[handlerNum] + return + +# Class to allow filtering of events and specific filtering of LogRecords *before* we put them over the IPC +class UIEventFilter(object): + def __init__(self, level, debug_domains): + self.update(None, level, debug_domains) + + def update(self, eventmask, level, debug_domains): + self.eventmask = eventmask + self.stdlevel = level + self.debug_domains = debug_domains + + def filter(self, event): + if isinstance(event, logging.LogRecord): + if event.levelno >= self.stdlevel: + return True + if event.name in self.debug_domains and event.levelno >= self.debug_domains[event.name]: + return True + return False + eid = str(event.__class__)[8:-2] + if self.eventmask and eid not in self.eventmask: + return False + return True + +def set_UIHmask(handlerNum, level, debug_domains, mask): + if not handlerNum in _ui_handlers: + return False + if '*' in mask: + _ui_logfilters[handlerNum].update(None, level, debug_domains) + else: + _ui_logfilters[handlerNum].update(mask, level, debug_domains) + return True + +def getName(e): + """Returns the name of a class or class instance""" + if getattr(e, "__name__", None) == None: + return e.__class__.__name__ + else: + return e.__name__ + +class OperationStarted(Event): + """An operation has begun""" + def __init__(self, msg = "Operation Started"): + Event.__init__(self) + self.msg = msg + +class OperationCompleted(Event): + """An operation has completed""" + def __init__(self, total, msg = "Operation Completed"): + Event.__init__(self) + self.total = total + self.msg = msg + +class OperationProgress(Event): + """An operation is in progress""" + def __init__(self, current, total, msg = "Operation in Progress"): + Event.__init__(self) + self.current = current + self.total = total + self.msg = msg + ": %s/%s" % (current, total); + +class ConfigParsed(Event): + """Configuration Parsing Complete""" + +class RecipeEvent(Event): + def __init__(self, fn): + self.fn = fn + Event.__init__(self) + +class RecipePreFinalise(RecipeEvent): + """ Recipe Parsing Complete but not yet finialised""" + +class RecipeParsed(RecipeEvent): + """ Recipe Parsing Complete """ + +class StampUpdate(Event): + """Trigger for any adjustment of the stamp files to happen""" + + def __init__(self, targets, stampfns): + self._targets = targets + self._stampfns = stampfns + Event.__init__(self) + + def getStampPrefix(self): + return self._stampfns + + def getTargets(self): + return self._targets + + stampPrefix = property(getStampPrefix) + targets = property(getTargets) + +class BuildBase(Event): + """Base class for bbmake run events""" + + def __init__(self, n, p, failures = 0): + self._name = n + self._pkgs = p + Event.__init__(self) + self._failures = failures + + def getPkgs(self): + return self._pkgs + + def setPkgs(self, pkgs): + self._pkgs = pkgs + + def getName(self): + return self._name + + def setName(self, name): + self._name = name + + def getCfg(self): + return self.data + + def setCfg(self, cfg): + self.data = cfg + + def getFailures(self): + """ + Return the number of failed packages + """ + return self._failures + + pkgs = property(getPkgs, setPkgs, None, "pkgs property") + name = property(getName, setName, None, "name property") + cfg = property(getCfg, setCfg, None, "cfg property") + + + + + +class BuildStarted(BuildBase, OperationStarted): + """bbmake build run started""" + def __init__(self, n, p, failures = 0): + OperationStarted.__init__(self, "Building Started") + BuildBase.__init__(self, n, p, failures) + +class BuildCompleted(BuildBase, OperationCompleted): + """bbmake build run completed""" + def __init__(self, total, n, p, failures = 0): + if not failures: + OperationCompleted.__init__(self, total, "Building Succeeded") + else: + OperationCompleted.__init__(self, total, "Building Failed") + BuildBase.__init__(self, n, p, failures) + +class DiskFull(Event): + """Disk full case build aborted""" + def __init__(self, dev, type, freespace, mountpoint): + Event.__init__(self) + self._dev = dev + self._type = type + self._free = freespace + self._mountpoint = mountpoint + +class NoProvider(Event): + """No Provider for an Event""" + + def __init__(self, item, runtime=False, dependees=None, reasons=[], close_matches=[]): + Event.__init__(self) + self._item = item + self._runtime = runtime + self._dependees = dependees + self._reasons = reasons + self._close_matches = close_matches + + def getItem(self): + return self._item + + def isRuntime(self): + return self._runtime + +class MultipleProviders(Event): + """Multiple Providers""" + + def __init__(self, item, candidates, runtime = False): + Event.__init__(self) + self._item = item + self._candidates = candidates + self._is_runtime = runtime + + def isRuntime(self): + """ + Is this a runtime issue? + """ + return self._is_runtime + + def getItem(self): + """ + The name for the to be build item + """ + return self._item + + def getCandidates(self): + """ + Get the possible Candidates for a PROVIDER. + """ + return self._candidates + +class ParseStarted(OperationStarted): + """Recipe parsing for the runqueue has begun""" + def __init__(self, total): + OperationStarted.__init__(self, "Recipe parsing Started") + self.total = total + +class ParseCompleted(OperationCompleted): + """Recipe parsing for the runqueue has completed""" + def __init__(self, cached, parsed, skipped, masked, virtuals, errors, total): + OperationCompleted.__init__(self, total, "Recipe parsing Completed") + self.cached = cached + self.parsed = parsed + self.skipped = skipped + self.virtuals = virtuals + self.masked = masked + self.errors = errors + self.sofar = cached + parsed + +class ParseProgress(OperationProgress): + """Recipe parsing progress""" + def __init__(self, current, total): + OperationProgress.__init__(self, current, total, "Recipe parsing") + + +class CacheLoadStarted(OperationStarted): + """Loading of the dependency cache has begun""" + def __init__(self, total): + OperationStarted.__init__(self, "Loading cache Started") + self.total = total + +class CacheLoadProgress(OperationProgress): + """Cache loading progress""" + def __init__(self, current, total): + OperationProgress.__init__(self, current, total, "Loading cache") + +class CacheLoadCompleted(OperationCompleted): + """Cache loading is complete""" + def __init__(self, total, num_entries): + OperationCompleted.__init__(self, total, "Loading cache Completed") + self.num_entries = num_entries + +class TreeDataPreparationStarted(OperationStarted): + """Tree data preparation started""" + def __init__(self): + OperationStarted.__init__(self, "Preparing tree data Started") + +class TreeDataPreparationProgress(OperationProgress): + """Tree data preparation is in progress""" + def __init__(self, current, total): + OperationProgress.__init__(self, current, total, "Preparing tree data") + +class TreeDataPreparationCompleted(OperationCompleted): + """Tree data preparation completed""" + def __init__(self, total): + OperationCompleted.__init__(self, total, "Preparing tree data Completed") + +class DepTreeGenerated(Event): + """ + Event when a dependency tree has been generated + """ + + def __init__(self, depgraph): + Event.__init__(self) + self._depgraph = depgraph + +class TargetsTreeGenerated(Event): + """ + Event when a set of buildable targets has been generated + """ + def __init__(self, model): + Event.__init__(self) + self._model = model + +class FilesMatchingFound(Event): + """ + Event when a list of files matching the supplied pattern has + been generated + """ + def __init__(self, pattern, matches): + Event.__init__(self) + self._pattern = pattern + self._matches = matches + +class CoreBaseFilesFound(Event): + """ + Event when a list of appropriate config files has been generated + """ + def __init__(self, paths): + Event.__init__(self) + self._paths = paths + +class ConfigFilesFound(Event): + """ + Event when a list of appropriate config files has been generated + """ + def __init__(self, variable, values): + Event.__init__(self) + self._variable = variable + self._values = values + +class ConfigFilePathFound(Event): + """ + Event when a path for a config file has been found + """ + def __init__(self, path): + Event.__init__(self) + self._path = path + +class MsgBase(Event): + """Base class for messages""" + + def __init__(self, msg): + self._message = msg + Event.__init__(self) + +class MsgDebug(MsgBase): + """Debug Message""" + +class MsgNote(MsgBase): + """Note Message""" + +class MsgWarn(MsgBase): + """Warning Message""" + +class MsgError(MsgBase): + """Error Message""" + +class MsgFatal(MsgBase): + """Fatal Message""" + +class MsgPlain(MsgBase): + """General output""" + +class LogExecTTY(Event): + """Send event containing program to spawn on tty of the logger""" + def __init__(self, msg, prog, sleep_delay, retries): + Event.__init__(self) + self.msg = msg + self.prog = prog + self.sleep_delay = sleep_delay + self.retries = retries + +class LogHandler(logging.Handler): + """Dispatch logging messages as bitbake events""" + + def emit(self, record): + if record.exc_info: + etype, value, tb = record.exc_info + if hasattr(tb, 'tb_next'): + tb = list(bb.exceptions.extract_traceback(tb, context=3)) + record.bb_exc_info = (etype, value, tb) + record.exc_info = None + fire(record, None) + + def filter(self, record): + record.taskpid = worker_pid + return True + +class RequestPackageInfo(Event): + """ + Event to request package information + """ + +class PackageInfo(Event): + """ + Package information for GUI + """ + def __init__(self, pkginfolist): + Event.__init__(self) + self._pkginfolist = pkginfolist + +class MetadataEvent(Event): + """ + Generic event that target for OE-Core classes + to report information during asynchrous execution + """ + def __init__(self, eventtype, eventdata): + Event.__init__(self) + self.type = eventtype + self.data = eventdata + +class SanityCheck(Event): + """ + Event to runs sanity checks, either raise errors or generate events as return status. + """ + def __init__(self, generateevents = True): + Event.__init__(self) + self.generateevents = generateevents + +class SanityCheckPassed(Event): + """ + Event to indicate sanity check is passed + """ + +class SanityCheckFailed(Event): + """ + Event to indicate sanity check has failed + """ + def __init__(self, msg, network_error=False): + Event.__init__(self) + self._msg = msg + self._network_error = network_error + +class NetworkTest(Event): + """ + Event to run network connectivity tests, either raise errors or generate events as return status. + """ + def __init__(self, generateevents = True): + Event.__init__(self) + self.generateevents = generateevents + +class NetworkTestPassed(Event): + """ + Event to indicate network test has passed + """ + +class NetworkTestFailed(Event): + """ + Event to indicate network test has failed + """ + diff --git a/bitbake/lib/bb/exceptions.py b/bitbake/lib/bb/exceptions.py new file mode 100644 index 0000000000..f182c8fd62 --- /dev/null +++ b/bitbake/lib/bb/exceptions.py @@ -0,0 +1,91 @@ +from __future__ import absolute_import +import inspect +import traceback +import bb.namedtuple_with_abc +from collections import namedtuple + + +class TracebackEntry(namedtuple.abc): + """Pickleable representation of a traceback entry""" + _fields = 'filename lineno function args code_context index' + _header = ' File "{0.filename}", line {0.lineno}, in {0.function}{0.args}' + + def format(self, formatter=None): + if not self.code_context: + return self._header.format(self) + '\n' + + formatted = [self._header.format(self) + ':\n'] + + for lineindex, line in enumerate(self.code_context): + if formatter: + line = formatter(line) + + if lineindex == self.index: + formatted.append(' >%s' % line) + else: + formatted.append(' %s' % line) + return formatted + + def __str__(self): + return ''.join(self.format()) + +def _get_frame_args(frame): + """Get the formatted arguments and class (if available) for a frame""" + arginfo = inspect.getargvalues(frame) + + try: + if not arginfo.args: + return '', None + # There have been reports from the field of python 2.6 which doesn't + # return a namedtuple here but simply a tuple so fallback gracefully if + # args isn't present. + except AttributeError: + return '', None + + firstarg = arginfo.args[0] + if firstarg == 'self': + self = arginfo.locals['self'] + cls = self.__class__.__name__ + + arginfo.args.pop(0) + del arginfo.locals['self'] + else: + cls = None + + formatted = inspect.formatargvalues(*arginfo) + return formatted, cls + +def extract_traceback(tb, context=1): + frames = inspect.getinnerframes(tb, context) + for frame, filename, lineno, function, code_context, index in frames: + formatted_args, cls = _get_frame_args(frame) + if cls: + function = '%s.%s' % (cls, function) + yield TracebackEntry(filename, lineno, function, formatted_args, + code_context, index) + +def format_extracted(extracted, formatter=None, limit=None): + if limit: + extracted = extracted[-limit:] + + formatted = [] + for tracebackinfo in extracted: + formatted.extend(tracebackinfo.format(formatter)) + return formatted + + +def format_exception(etype, value, tb, context=1, limit=None, formatter=None): + formatted = ['Traceback (most recent call last):\n'] + + if hasattr(tb, 'tb_next'): + tb = extract_traceback(tb, context) + + formatted.extend(format_extracted(tb, formatter, limit)) + formatted.extend(traceback.format_exception_only(etype, value)) + return formatted + +def to_string(exc): + if isinstance(exc, SystemExit): + if not isinstance(exc.code, basestring): + return 'Exited with "%d"' % exc.code + return str(exc) diff --git a/bitbake/lib/bb/fetch2/__init__.py b/bitbake/lib/bb/fetch2/__init__.py new file mode 100644 index 0000000000..5a03a0e46e --- /dev/null +++ b/bitbake/lib/bb/fetch2/__init__.py @@ -0,0 +1,1575 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementations + +Classes for obtaining upstream sources for the +BitBake build tools. +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2012 Intel Corporation +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +from __future__ import absolute_import +from __future__ import print_function +import os, re +import signal +import glob +import logging +import urllib +import urlparse +import operator +import bb.persist_data, bb.utils +import bb.checksum +from bb import data +import bb.process +import subprocess + +__version__ = "2" +_checksum_cache = bb.checksum.FileChecksumCache() + +logger = logging.getLogger("BitBake.Fetcher") + +class BBFetchException(Exception): + """Class all fetch exceptions inherit from""" + def __init__(self, message): + self.msg = message + Exception.__init__(self, message) + + def __str__(self): + return self.msg + +class MalformedUrl(BBFetchException): + """Exception raised when encountering an invalid url""" + def __init__(self, url): + msg = "The URL: '%s' is invalid and cannot be interpreted" % url + self.url = url + BBFetchException.__init__(self, msg) + self.args = (url,) + +class FetchError(BBFetchException): + """General fetcher exception when something happens incorrectly""" + def __init__(self, message, url = None): + if url: + msg = "Fetcher failure for URL: '%s'. %s" % (url, message) + else: + msg = "Fetcher failure: %s" % message + self.url = url + BBFetchException.__init__(self, msg) + self.args = (message, url) + +class ChecksumError(FetchError): + """Exception when mismatched checksum encountered""" + def __init__(self, message, url = None, checksum = None): + self.checksum = checksum + FetchError.__init__(self, message, url) + +class NoChecksumError(FetchError): + """Exception when no checksum is specified, but BB_STRICT_CHECKSUM is set""" + +class UnpackError(BBFetchException): + """General fetcher exception when something happens incorrectly when unpacking""" + def __init__(self, message, url): + msg = "Unpack failure for URL: '%s'. %s" % (url, message) + self.url = url + BBFetchException.__init__(self, msg) + self.args = (message, url) + +class NoMethodError(BBFetchException): + """Exception raised when there is no method to obtain a supplied url or set of urls""" + def __init__(self, url): + msg = "Could not find a fetcher which supports the URL: '%s'" % url + self.url = url + BBFetchException.__init__(self, msg) + self.args = (url,) + +class MissingParameterError(BBFetchException): + """Exception raised when a fetch method is missing a critical parameter in the url""" + def __init__(self, missing, url): + msg = "URL: '%s' is missing the required parameter '%s'" % (url, missing) + self.url = url + self.missing = missing + BBFetchException.__init__(self, msg) + self.args = (missing, url) + +class ParameterError(BBFetchException): + """Exception raised when a url cannot be proccessed due to invalid parameters.""" + def __init__(self, message, url): + msg = "URL: '%s' has invalid parameters. %s" % (url, message) + self.url = url + BBFetchException.__init__(self, msg) + self.args = (message, url) + +class NetworkAccess(BBFetchException): + """Exception raised when network access is disabled but it is required.""" + def __init__(self, url, cmd): + msg = "Network access disabled through BB_NO_NETWORK (or set indirectly due to use of BB_FETCH_PREMIRRORONLY) but access requested with command %s (for url %s)" % (cmd, url) + self.url = url + self.cmd = cmd + BBFetchException.__init__(self, msg) + self.args = (url, cmd) + +class NonLocalMethod(Exception): + def __init__(self): + Exception.__init__(self) + + +class URI(object): + """ + A class representing a generic URI, with methods for + accessing the URI components, and stringifies to the + URI. + + It is constructed by calling it with a URI, or setting + the attributes manually: + + uri = URI("http://example.com/") + + uri = URI() + uri.scheme = 'http' + uri.hostname = 'example.com' + uri.path = '/' + + It has the following attributes: + + * scheme (read/write) + * userinfo (authentication information) (read/write) + * username (read/write) + * password (read/write) + + Note, password is deprecated as of RFC 3986. + + * hostname (read/write) + * port (read/write) + * hostport (read only) + "hostname:port", if both are set, otherwise just "hostname" + * path (read/write) + * path_quoted (read/write) + A URI quoted version of path + * params (dict) (read/write) + * query (dict) (read/write) + * relative (bool) (read only) + True if this is a "relative URI", (e.g. file:foo.diff) + + It stringifies to the URI itself. + + Some notes about relative URIs: while it's specified that + a URI beginning with :// should either be directly + followed by a hostname or a /, the old URI handling of the + fetch2 library did not comform to this. Therefore, this URI + class has some kludges to make sure that URIs are parsed in + a way comforming to bitbake's current usage. This URI class + supports the following: + + file:relative/path.diff (IETF compliant) + git:relative/path.git (IETF compliant) + git:///absolute/path.git (IETF compliant) + file:///absolute/path.diff (IETF compliant) + + file://relative/path.diff (not IETF compliant) + + But it does not support the following: + + file://hostname/absolute/path.diff (would be IETF compliant) + + Note that the last case only applies to a list of + "whitelisted" schemes (currently only file://), that requires + its URIs to not have a network location. + """ + + _relative_schemes = ['file', 'git'] + _netloc_forbidden = ['file'] + + def __init__(self, uri=None): + self.scheme = '' + self.userinfo = '' + self.hostname = '' + self.port = None + self._path = '' + self.params = {} + self.query = {} + self.relative = False + + if not uri: + return + + # We hijack the URL parameters, since the way bitbake uses + # them are not quite RFC compliant. + uri, param_str = (uri.split(";", 1) + [None])[:2] + + urlp = urlparse.urlparse(uri) + self.scheme = urlp.scheme + + reparse = 0 + + # Coerce urlparse to make URI scheme use netloc + if not self.scheme in urlparse.uses_netloc: + urlparse.uses_params.append(self.scheme) + reparse = 1 + + # Make urlparse happy(/ier) by converting local resources + # to RFC compliant URL format. E.g.: + # file://foo.diff -> file:foo.diff + if urlp.scheme in self._netloc_forbidden: + uri = re.sub("(?<=:)//(?!/)", "", uri, 1) + reparse = 1 + + if reparse: + urlp = urlparse.urlparse(uri) + + # Identify if the URI is relative or not + if urlp.scheme in self._relative_schemes and \ + re.compile("^\w+:(?!//)").match(uri): + self.relative = True + + if not self.relative: + self.hostname = urlp.hostname or '' + self.port = urlp.port + + self.userinfo += urlp.username or '' + + if urlp.password: + self.userinfo += ':%s' % urlp.password + + self.path = urllib.unquote(urlp.path) + + if param_str: + self.params = self._param_str_split(param_str, ";") + if urlp.query: + self.query = self._param_str_split(urlp.query, "&") + + def __str__(self): + userinfo = self.userinfo + if userinfo: + userinfo += '@' + + return "%s:%s%s%s%s%s%s" % ( + self.scheme, + '' if self.relative else '//', + userinfo, + self.hostport, + self.path_quoted, + self._query_str(), + self._param_str()) + + def _param_str(self): + return ( + ''.join([';', self._param_str_join(self.params, ";")]) + if self.params else '') + + def _query_str(self): + return ( + ''.join(['?', self._param_str_join(self.query, "&")]) + if self.query else '') + + def _param_str_split(self, string, elmdelim, kvdelim="="): + ret = {} + for k, v in [x.split(kvdelim, 1) for x in string.split(elmdelim)]: + ret[k] = v + return ret + + def _param_str_join(self, dict_, elmdelim, kvdelim="="): + return elmdelim.join([kvdelim.join([k, v]) for k, v in dict_.items()]) + + @property + def hostport(self): + if not self.port: + return self.hostname + return "%s:%d" % (self.hostname, self.port) + + @property + def path_quoted(self): + return urllib.quote(self.path) + + @path_quoted.setter + def path_quoted(self, path): + self.path = urllib.unquote(path) + + @property + def path(self): + return self._path + + @path.setter + def path(self, path): + self._path = path + + if re.compile("^/").match(path): + self.relative = False + else: + self.relative = True + + @property + def username(self): + if self.userinfo: + return (self.userinfo.split(":", 1))[0] + return '' + + @username.setter + def username(self, username): + password = self.password + self.userinfo = username + if password: + self.userinfo += ":%s" % password + + @property + def password(self): + if self.userinfo and ":" in self.userinfo: + return (self.userinfo.split(":", 1))[1] + return '' + + @password.setter + def password(self, password): + self.userinfo = "%s:%s" % (self.username, password) + +def decodeurl(url): + """Decodes an URL into the tokens (scheme, network location, path, + user, password, parameters). + """ + + m = re.compile('(?P[^:]*)://((?P[^/]+)@)?(?P[^;]+)(;(?P.*))?').match(url) + if not m: + raise MalformedUrl(url) + + type = m.group('type') + location = m.group('location') + if not location: + raise MalformedUrl(url) + user = m.group('user') + parm = m.group('parm') + + locidx = location.find('/') + if locidx != -1 and type.lower() != 'file': + host = location[:locidx] + path = location[locidx:] + else: + host = "" + path = location + if user: + m = re.compile('(?P[^:]+)(:?(?P.*))').match(user) + if m: + user = m.group('user') + pswd = m.group('pswd') + else: + user = '' + pswd = '' + + p = {} + if parm: + for s in parm.split(';'): + s1, s2 = s.split('=') + p[s1] = s2 + + return type, host, urllib.unquote(path), user, pswd, p + +def encodeurl(decoded): + """Encodes a URL from tokens (scheme, network location, path, + user, password, parameters). + """ + + type, host, path, user, pswd, p = decoded + + if not path: + raise MissingParameterError('path', "encoded from the data %s" % str(decoded)) + if not type: + raise MissingParameterError('type', "encoded from the data %s" % str(decoded)) + url = '%s://' % type + if user and type != "file": + url += "%s" % user + if pswd: + url += ":%s" % pswd + url += "@" + if host and type != "file": + url += "%s" % host + # Standardise path to ensure comparisons work + while '//' in path: + path = path.replace("//", "/") + url += "%s" % urllib.quote(path) + if p: + for parm in p: + url += ";%s=%s" % (parm, p[parm]) + + return url + +def uri_replace(ud, uri_find, uri_replace, replacements, d): + if not ud.url or not uri_find or not uri_replace: + logger.error("uri_replace: passed an undefined value, not replacing") + return None + uri_decoded = list(decodeurl(ud.url)) + uri_find_decoded = list(decodeurl(uri_find)) + uri_replace_decoded = list(decodeurl(uri_replace)) + logger.debug(2, "For url %s comparing %s to %s" % (uri_decoded, uri_find_decoded, uri_replace_decoded)) + result_decoded = ['', '', '', '', '', {}] + for loc, i in enumerate(uri_find_decoded): + result_decoded[loc] = uri_decoded[loc] + regexp = i + if loc == 0 and regexp and not regexp.endswith("$"): + # Leaving the type unanchored can mean "https" matching "file" can become "files" + # which is clearly undesirable. + regexp += "$" + if loc == 5: + # Handle URL parameters + if i: + # Any specified URL parameters must match + for k in uri_replace_decoded[loc]: + if uri_decoded[loc][k] != uri_replace_decoded[loc][k]: + return None + # Overwrite any specified replacement parameters + for k in uri_replace_decoded[loc]: + for l in replacements: + uri_replace_decoded[loc][k] = uri_replace_decoded[loc][k].replace(l, replacements[l]) + result_decoded[loc][k] = uri_replace_decoded[loc][k] + elif (re.match(regexp, uri_decoded[loc])): + if not uri_replace_decoded[loc]: + result_decoded[loc] = "" + else: + for k in replacements: + uri_replace_decoded[loc] = uri_replace_decoded[loc].replace(k, replacements[k]) + #bb.note("%s %s %s" % (regexp, uri_replace_decoded[loc], uri_decoded[loc])) + result_decoded[loc] = re.sub(regexp, uri_replace_decoded[loc], uri_decoded[loc]) + if loc == 2: + # Handle path manipulations + basename = None + if uri_decoded[0] != uri_replace_decoded[0] and ud.mirrortarball: + # If the source and destination url types differ, must be a mirrortarball mapping + basename = os.path.basename(ud.mirrortarball) + # Kill parameters, they make no sense for mirror tarballs + uri_decoded[5] = {} + elif ud.localpath and ud.method.supports_checksum(ud): + basename = os.path.basename(ud.localpath) + if basename and not result_decoded[loc].endswith(basename): + result_decoded[loc] = os.path.join(result_decoded[loc], basename) + else: + return None + result = encodeurl(result_decoded) + if result == ud.url: + return None + logger.debug(2, "For url %s returning %s" % (ud.url, result)) + return result + +methods = [] +urldata_cache = {} +saved_headrevs = {} + +def fetcher_init(d): + """ + Called to initialize the fetchers once the configuration data is known. + Calls before this must not hit the cache. + """ + # When to drop SCM head revisions controlled by user policy + srcrev_policy = d.getVar('BB_SRCREV_POLICY', True) or "clear" + if srcrev_policy == "cache": + logger.debug(1, "Keeping SRCREV cache due to cache policy of: %s", srcrev_policy) + elif srcrev_policy == "clear": + logger.debug(1, "Clearing SRCREV cache due to cache policy of: %s", srcrev_policy) + revs = bb.persist_data.persist('BB_URI_HEADREVS', d) + try: + bb.fetch2.saved_headrevs = revs.items() + except: + pass + revs.clear() + else: + raise FetchError("Invalid SRCREV cache policy of: %s" % srcrev_policy) + + _checksum_cache.init_cache(d) + + for m in methods: + if hasattr(m, "init"): + m.init(d) + +def fetcher_parse_save(d): + _checksum_cache.save_extras(d) + +def fetcher_parse_done(d): + _checksum_cache.save_merge(d) + +def fetcher_compare_revisions(d): + """ + Compare the revisions in the persistant cache with current values and + return true/false on whether they've changed. + """ + + data = bb.persist_data.persist('BB_URI_HEADREVS', d).items() + data2 = bb.fetch2.saved_headrevs + + changed = False + for key in data: + if key not in data2 or data2[key] != data[key]: + logger.debug(1, "%s changed", key) + changed = True + return True + else: + logger.debug(2, "%s did not change", key) + return False + +def mirror_from_string(data): + return [ i.split() for i in (data or "").replace('\\n','\n').split('\n') if i ] + +def verify_checksum(ud, d): + """ + verify the MD5 and SHA256 checksum for downloaded src + + Raises a FetchError if one or both of the SRC_URI checksums do not match + the downloaded file, or if BB_STRICT_CHECKSUM is set and there are no + checksums specified. + + """ + + if not ud.method.supports_checksum(ud): + return + + md5data = bb.utils.md5_file(ud.localpath) + sha256data = bb.utils.sha256_file(ud.localpath) + + if ud.method.recommends_checksum(ud): + # If strict checking enabled and neither sum defined, raise error + strict = d.getVar("BB_STRICT_CHECKSUM", True) or None + if strict and not (ud.md5_expected or ud.sha256_expected): + logger.error('No checksum specified for %s, please add at least one to the recipe:\n' + 'SRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"' % + (ud.localpath, ud.md5_name, md5data, + ud.sha256_name, sha256data)) + raise NoChecksumError('Missing SRC_URI checksum', ud.url) + + # Log missing sums so user can more easily add them + if not ud.md5_expected: + logger.warn('Missing md5 SRC_URI checksum for %s, consider adding to the recipe:\n' + 'SRC_URI[%s] = "%s"', + ud.localpath, ud.md5_name, md5data) + + if not ud.sha256_expected: + logger.warn('Missing sha256 SRC_URI checksum for %s, consider adding to the recipe:\n' + 'SRC_URI[%s] = "%s"', + ud.localpath, ud.sha256_name, sha256data) + + md5mismatch = False + sha256mismatch = False + + if ud.md5_expected != md5data: + md5mismatch = True + + if ud.sha256_expected != sha256data: + sha256mismatch = True + + # We want to alert the user if a checksum is defined in the recipe but + # it does not match. + msg = "" + mismatch = False + if md5mismatch and ud.md5_expected: + msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'md5', md5data, ud.md5_expected) + mismatch = True; + + if sha256mismatch and ud.sha256_expected: + msg = msg + "\nFile: '%s' has %s checksum %s when %s was expected" % (ud.localpath, 'sha256', sha256data, ud.sha256_expected) + mismatch = True; + + if mismatch: + msg = msg + '\nIf this change is expected (e.g. you have upgraded to a new version without updating the checksums) then you can use these lines within the recipe:\nSRC_URI[%s] = "%s"\nSRC_URI[%s] = "%s"\nOtherwise you should retry the download and/or check with upstream to determine if the file has become corrupted or otherwise unexpectedly modified.\n' % (ud.md5_name, md5data, ud.sha256_name, sha256data) + + if len(msg): + raise ChecksumError('Checksum mismatch!%s' % msg, ud.url, md5data) + + +def update_stamp(ud, d): + """ + donestamp is file stamp indicating the whole fetching is done + this function update the stamp after verifying the checksum + """ + if os.path.exists(ud.donestamp): + # Touch the done stamp file to show active use of the download + try: + os.utime(ud.donestamp, None) + except: + # Errors aren't fatal here + pass + else: + verify_checksum(ud, d) + open(ud.donestamp, 'w').close() + +def subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + # SIGPIPE errors are known issues with gzip/bash + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + +def get_autorev(d): + # only not cache src rev in autorev case + if d.getVar('BB_SRCREV_POLICY', True) != "cache": + d.setVar('__BB_DONT_CACHE', '1') + return "AUTOINC" + +def get_srcrev(d): + """ + Return the version string for the current package + (usually to be used as PV) + Most packages usually only have one SCM so we just pass on the call. + In the multi SCM case, we build a value based on SRCREV_FORMAT which must + have been set. + """ + + scms = [] + fetcher = Fetch(d.getVar('SRC_URI', True).split(), d) + urldata = fetcher.ud + for u in urldata: + if urldata[u].method.supports_srcrev(): + scms.append(u) + + if len(scms) == 0: + raise FetchError("SRCREV was used yet no valid SCM was found in SRC_URI") + + if len(scms) == 1 and len(urldata[scms[0]].names) == 1: + autoinc, rev = urldata[scms[0]].method.sortable_revision(urldata[scms[0]], d, urldata[scms[0]].names[0]) + if len(rev) > 10: + rev = rev[:10] + if autoinc: + return "AUTOINC+" + rev + return rev + + # + # Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT + # + format = d.getVar('SRCREV_FORMAT', True) + if not format: + raise FetchError("The SRCREV_FORMAT variable must be set when multiple SCMs are used.") + + seenautoinc = False + for scm in scms: + ud = urldata[scm] + for name in ud.names: + autoinc, rev = ud.method.sortable_revision(ud, d, name) + seenautoinc = seenautoinc or autoinc + if len(rev) > 10: + rev = rev[:10] + format = format.replace(name, rev) + if seenautoinc: + format = "AUTOINC+" + format + + return format + +def localpath(url, d): + fetcher = bb.fetch2.Fetch([url], d) + return fetcher.localpath(url) + +def runfetchcmd(cmd, d, quiet = False, cleanup = []): + """ + Run cmd returning the command output + Raise an error if interrupted or cmd fails + Optionally echo command output to stdout + Optionally remove the files/directories listed in cleanup upon failure + """ + + # Need to export PATH as binary could be in metadata paths + # rather than host provided + # Also include some other variables. + # FIXME: Should really include all export varaiables? + exportvars = ['HOME', 'PATH', + 'HTTP_PROXY', 'http_proxy', + 'HTTPS_PROXY', 'https_proxy', + 'FTP_PROXY', 'ftp_proxy', + 'FTPS_PROXY', 'ftps_proxy', + 'NO_PROXY', 'no_proxy', + 'ALL_PROXY', 'all_proxy', + 'GIT_PROXY_COMMAND', + 'SSH_AUTH_SOCK', 'SSH_AGENT_PID', + 'SOCKS5_USER', 'SOCKS5_PASSWD'] + + for var in exportvars: + val = d.getVar(var, True) + if val: + cmd = 'export ' + var + '=\"%s\"; %s' % (val, cmd) + + logger.debug(1, "Running %s", cmd) + + success = False + error_message = "" + + try: + (output, errors) = bb.process.run(cmd, shell=True, stderr=subprocess.PIPE) + success = True + except bb.process.NotFoundError as e: + error_message = "Fetch command %s" % (e.command) + except bb.process.ExecutionError as e: + if e.stdout: + output = "output:\n%s\n%s" % (e.stdout, e.stderr) + elif e.stderr: + output = "output:\n%s" % e.stderr + else: + output = "no output" + error_message = "Fetch command failed with exit code %s, %s" % (e.exitcode, output) + except bb.process.CmdError as e: + error_message = "Fetch command %s could not be run:\n%s" % (e.command, e.msg) + if not success: + for f in cleanup: + try: + bb.utils.remove(f, True) + except OSError: + pass + + raise FetchError(error_message) + + return output + +def check_network_access(d, info = "", url = None): + """ + log remote network access, and error if BB_NO_NETWORK is set + """ + if d.getVar("BB_NO_NETWORK", True) == "1": + raise NetworkAccess(url, info) + else: + logger.debug(1, "Fetcher accessed the network with the command %s" % info) + +def build_mirroruris(origud, mirrors, ld): + uris = [] + uds = [] + + replacements = {} + replacements["TYPE"] = origud.type + replacements["HOST"] = origud.host + replacements["PATH"] = origud.path + replacements["BASENAME"] = origud.path.split("/")[-1] + replacements["MIRRORNAME"] = origud.host.replace(':','.') + origud.path.replace('/', '.').replace('*', '.') + + def adduri(ud, uris, uds): + for line in mirrors: + try: + (find, replace) = line + except ValueError: + continue + newuri = uri_replace(ud, find, replace, replacements, ld) + if not newuri or newuri in uris or newuri == origud.url: + continue + try: + newud = FetchData(newuri, ld) + newud.setup_localpath(ld) + except bb.fetch2.BBFetchException as e: + logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (newuri, origud.url)) + logger.debug(1, str(e)) + try: + ud.method.clean(ud, ld) + except UnboundLocalError: + pass + continue + uris.append(newuri) + uds.append(newud) + + adduri(newud, uris, uds) + + adduri(origud, uris, uds) + + return uris, uds + +def rename_bad_checksum(ud, suffix): + """ + Renames files to have suffix from parameter + """ + + if ud.localpath is None: + return + + new_localpath = "%s_bad-checksum_%s" % (ud.localpath, suffix) + bb.warn("Renaming %s to %s" % (ud.localpath, new_localpath)) + bb.utils.movefile(ud.localpath, new_localpath) + + +def try_mirror_url(origud, ud, ld, check = False): + # Return of None or a value means we're finished + # False means try another url + try: + if check: + found = ud.method.checkstatus(ud, ld) + if found: + return found + return False + + os.chdir(ld.getVar("DL_DIR", True)) + + if not os.path.exists(ud.donestamp) or ud.method.need_update(ud, ld): + ud.method.download(ud, ld) + if hasattr(ud.method,"build_mirror_data"): + ud.method.build_mirror_data(ud, ld) + + if not ud.localpath or not os.path.exists(ud.localpath): + return False + + if ud.localpath == origud.localpath: + return ud.localpath + + # We may be obtaining a mirror tarball which needs further processing by the real fetcher + # If that tarball is a local file:// we need to provide a symlink to it + dldir = ld.getVar("DL_DIR", True) + if origud.mirrortarball and os.path.basename(ud.localpath) == os.path.basename(origud.mirrortarball) \ + and os.path.basename(ud.localpath) != os.path.basename(origud.localpath): + bb.utils.mkdirhier(os.path.dirname(ud.donestamp)) + open(ud.donestamp, 'w').close() + dest = os.path.join(dldir, os.path.basename(ud.localpath)) + if not os.path.exists(dest): + os.symlink(ud.localpath, dest) + if not os.path.exists(origud.donestamp) or origud.method.need_update(origud, ld): + origud.method.download(origud, ld) + if hasattr(origud.method,"build_mirror_data"): + origud.method.build_mirror_data(origud, ld) + return ud.localpath + # Otherwise the result is a local file:// and we symlink to it + if not os.path.exists(origud.localpath): + if os.path.islink(origud.localpath): + # Broken symbolic link + os.unlink(origud.localpath) + + os.symlink(ud.localpath, origud.localpath) + update_stamp(origud, ld) + return ud.localpath + + except bb.fetch2.NetworkAccess: + raise + + except bb.fetch2.BBFetchException as e: + if isinstance(e, ChecksumError): + logger.warn("Mirror checksum failure for url %s (original url: %s)\nCleaning and trying again." % (ud.url, origud.url)) + logger.warn(str(e)) + rename_bad_checksum(ud, e.checksum) + elif isinstance(e, NoChecksumError): + raise + else: + logger.debug(1, "Mirror fetch failure for url %s (original url: %s)" % (ud.url, origud.url)) + logger.debug(1, str(e)) + try: + ud.method.clean(ud, ld) + except UnboundLocalError: + pass + return False + +def try_mirrors(d, origud, mirrors, check = False): + """ + Try to use a mirrored version of the sources. + This method will be automatically called before the fetchers go. + + d Is a bb.data instance + uri is the original uri we're trying to download + mirrors is the list of mirrors we're going to try + """ + ld = d.createCopy() + + uris, uds = build_mirroruris(origud, mirrors, ld) + + for index, uri in enumerate(uris): + ret = try_mirror_url(origud, uds[index], ld, check) + if ret != False: + return ret + return None + +def srcrev_internal_helper(ud, d, name): + """ + Return: + a) a source revision if specified + b) latest revision if SRCREV="AUTOINC" + c) None if not specified + """ + + srcrev = None + pn = d.getVar("PN", True) + attempts = [] + if name != '' and pn: + attempts.append("SRCREV_%s_pn-%s" % (name, pn)) + if name != '': + attempts.append("SRCREV_%s" % name) + if pn: + attempts.append("SRCREV_pn-%s" % pn) + attempts.append("SRCREV") + + for a in attempts: + srcrev = d.getVar(a, True) + if srcrev and srcrev != "INVALID": + break + + if 'rev' in ud.parm and 'tag' in ud.parm: + raise FetchError("Please specify a ;rev= parameter or a ;tag= parameter in the url %s but not both." % (ud.url)) + + if 'rev' in ud.parm or 'tag' in ud.parm: + if 'rev' in ud.parm: + parmrev = ud.parm['rev'] + else: + parmrev = ud.parm['tag'] + if srcrev == "INVALID" or not srcrev: + return parmrev + if srcrev != parmrev: + raise FetchError("Conflicting revisions (%s from SRCREV and %s from the url) found, please spcify one valid value" % (srcrev, parmrev)) + return parmrev + + if srcrev == "INVALID" or not srcrev: + raise FetchError("Please set a valid SRCREV for url %s (possible key names are %s, or use a ;rev=X URL parameter)" % (str(attempts), ud.url), ud.url) + if srcrev == "AUTOINC": + srcrev = ud.method.latest_revision(ud, d, name) + + return srcrev + +def get_checksum_file_list(d): + """ Get a list of files checksum in SRC_URI + + Returns the resolved local paths of all local file entries in + SRC_URI as a space-separated string + """ + fetch = Fetch([], d, cache = False, localonly = True) + + dl_dir = d.getVar('DL_DIR', True) + filelist = [] + for u in fetch.urls: + ud = fetch.ud[u] + + if ud and isinstance(ud.method, local.Local): + ud.setup_localpath(d) + f = ud.localpath + pth = ud.decodedurl + if '*' in pth: + f = os.path.join(os.path.abspath(f), pth) + if f.startswith(dl_dir): + # The local fetcher's behaviour is to return a path under DL_DIR if it couldn't find the file anywhere else + if os.path.exists(f): + bb.warn("Getting checksum for %s SRC_URI entry %s: file not found except in DL_DIR" % (d.getVar('PN', True), os.path.basename(f))) + else: + bb.warn("Unable to get checksum for %s SRC_URI entry %s: file could not be found" % (d.getVar('PN', True), os.path.basename(f))) + filelist.append(f) + + return " ".join(filelist) + + +def get_file_checksums(filelist, pn): + """Get a list of the checksums for a list of local files + + Returns the checksums for a list of local files, caching the results as + it proceeds + + """ + + def checksum_file(f): + try: + checksum = _checksum_cache.get_checksum(f) + except OSError as e: + bb.warn("Unable to get checksum for %s SRC_URI entry %s: %s" % (pn, os.path.basename(f), e)) + return None + return checksum + + def checksum_dir(pth): + # Handle directories recursively + dirchecksums = [] + for root, dirs, files in os.walk(pth): + for name in files: + fullpth = os.path.join(root, name) + checksum = checksum_file(fullpth) + if checksum: + dirchecksums.append((fullpth, checksum)) + return dirchecksums + + checksums = [] + for pth in filelist.split(): + checksum = None + if '*' in pth: + # Handle globs + for f in glob.glob(pth): + if os.path.isdir(f): + checksums.extend(checksum_dir(f)) + else: + checksum = checksum_file(f) + if checksum: + checksums.append((f, checksum)) + continue + elif os.path.isdir(pth): + checksums.extend(checksum_dir(pth)) + continue + else: + checksum = checksum_file(pth) + + if checksum: + checksums.append((pth, checksum)) + + checksums.sort(key=operator.itemgetter(1)) + return checksums + + +class FetchData(object): + """ + A class which represents the fetcher state for a given URI. + """ + def __init__(self, url, d, localonly = False): + # localpath is the location of a downloaded result. If not set, the file is local. + self.donestamp = None + self.localfile = "" + self.localpath = None + self.lockfile = None + self.mirrortarball = None + self.basename = None + self.basepath = None + (self.type, self.host, self.path, self.user, self.pswd, self.parm) = decodeurl(data.expand(url, d)) + self.date = self.getSRCDate(d) + self.url = url + if not self.user and "user" in self.parm: + self.user = self.parm["user"] + if not self.pswd and "pswd" in self.parm: + self.pswd = self.parm["pswd"] + self.setup = False + + if "name" in self.parm: + self.md5_name = "%s.md5sum" % self.parm["name"] + self.sha256_name = "%s.sha256sum" % self.parm["name"] + else: + self.md5_name = "md5sum" + self.sha256_name = "sha256sum" + if self.md5_name in self.parm: + self.md5_expected = self.parm[self.md5_name] + elif self.type not in ["http", "https", "ftp", "ftps", "sftp"]: + self.md5_expected = None + else: + self.md5_expected = d.getVarFlag("SRC_URI", self.md5_name) + if self.sha256_name in self.parm: + self.sha256_expected = self.parm[self.sha256_name] + elif self.type not in ["http", "https", "ftp", "ftps", "sftp"]: + self.sha256_expected = None + else: + self.sha256_expected = d.getVarFlag("SRC_URI", self.sha256_name) + + self.names = self.parm.get("name",'default').split(',') + + self.method = None + for m in methods: + if m.supports(self, d): + self.method = m + break + + if not self.method: + raise NoMethodError(url) + + if localonly and not isinstance(self.method, local.Local): + raise NonLocalMethod() + + if self.parm.get("proto", None) and "protocol" not in self.parm: + logger.warn('Consider updating %s recipe to use "protocol" not "proto" in SRC_URI.', d.getVar('PN', True)) + self.parm["protocol"] = self.parm.get("proto", None) + + if hasattr(self.method, "urldata_init"): + self.method.urldata_init(self, d) + + if "localpath" in self.parm: + # if user sets localpath for file, use it instead. + self.localpath = self.parm["localpath"] + self.basename = os.path.basename(self.localpath) + elif self.localfile: + self.localpath = self.method.localpath(self, d) + + dldir = d.getVar("DL_DIR", True) + # Note: .done and .lock files should always be in DL_DIR whereas localpath may not be. + if self.localpath and self.localpath.startswith(dldir): + basepath = self.localpath + elif self.localpath: + basepath = dldir + os.sep + os.path.basename(self.localpath) + else: + basepath = dldir + os.sep + (self.basepath or self.basename) + self.donestamp = basepath + '.done' + self.lockfile = basepath + '.lock' + + def setup_revisons(self, d): + self.revisions = {} + for name in self.names: + self.revisions[name] = srcrev_internal_helper(self, d, name) + + # add compatibility code for non name specified case + if len(self.names) == 1: + self.revision = self.revisions[self.names[0]] + + def setup_localpath(self, d): + if not self.localpath: + self.localpath = self.method.localpath(self, d) + + def getSRCDate(self, d): + """ + Return the SRC Date for the component + + d the bb.data module + """ + if "srcdate" in self.parm: + return self.parm['srcdate'] + + pn = d.getVar("PN", True) + + if pn: + return d.getVar("SRCDATE_%s" % pn, True) or d.getVar("SRCDATE", True) or d.getVar("DATE", True) + + return d.getVar("SRCDATE", True) or d.getVar("DATE", True) + +class FetchMethod(object): + """Base class for 'fetch'ing data""" + + def __init__(self, urls = []): + self.urls = [] + + def supports(self, urldata, d): + """ + Check to see if this fetch class supports a given url. + """ + return 0 + + def localpath(self, urldata, d): + """ + Return the local filename of a given url assuming a successful fetch. + Can also setup variables in urldata for use in go (saving code duplication + and duplicate code execution) + """ + return os.path.join(data.getVar("DL_DIR", d, True), urldata.localfile) + + def supports_checksum(self, urldata): + """ + Is localpath something that can be represented by a checksum? + """ + + # We cannot compute checksums for directories + if os.path.isdir(urldata.localpath) == True: + return False + if urldata.localpath.find("*") != -1: + return False + + return True + + def recommends_checksum(self, urldata): + """ + Is the backend on where checksumming is recommended (should warnings + be displayed if there is no checksum)? + """ + return False + + def _strip_leading_slashes(self, relpath): + """ + Remove leading slash as os.path.join can't cope + """ + while os.path.isabs(relpath): + relpath = relpath[1:] + return relpath + + def setUrls(self, urls): + self.__urls = urls + + def getUrls(self): + return self.__urls + + urls = property(getUrls, setUrls, None, "Urls property") + + def need_update(self, ud, d): + """ + Force a fetch, even if localpath exists? + """ + if os.path.exists(ud.localpath): + return False + return True + + def supports_srcrev(self): + """ + The fetcher supports auto source revisions (SRCREV) + """ + return False + + def download(self, urldata, d): + """ + Fetch urls + Assumes localpath was called first + """ + raise NoMethodError(url) + + def unpack(self, urldata, rootdir, data): + iterate = False + file = urldata.localpath + + try: + unpack = bb.utils.to_boolean(urldata.parm.get('unpack'), True) + except ValueError as exc: + bb.fatal("Invalid value for 'unpack' parameter for %s: %s" % + (file, urldata.parm.get('unpack'))) + + dots = file.split(".") + if dots[-1] in ['gz', 'bz2', 'Z', 'xz']: + efile = os.path.join(rootdir, os.path.basename('.'.join(dots[0:-1]))) + else: + efile = file + cmd = None + + if unpack: + if file.endswith('.tar'): + cmd = 'tar x --no-same-owner -f %s' % file + elif file.endswith('.tgz') or file.endswith('.tar.gz') or file.endswith('.tar.Z'): + cmd = 'tar xz --no-same-owner -f %s' % file + elif file.endswith('.tbz') or file.endswith('.tbz2') or file.endswith('.tar.bz2'): + cmd = 'bzip2 -dc %s | tar x --no-same-owner -f -' % file + elif file.endswith('.gz') or file.endswith('.Z') or file.endswith('.z'): + cmd = 'gzip -dc %s > %s' % (file, efile) + elif file.endswith('.bz2'): + cmd = 'bzip2 -dc %s > %s' % (file, efile) + elif file.endswith('.tar.xz'): + cmd = 'xz -dc %s | tar x --no-same-owner -f -' % file + elif file.endswith('.xz'): + cmd = 'xz -dc %s > %s' % (file, efile) + elif file.endswith('.zip') or file.endswith('.jar'): + try: + dos = bb.utils.to_boolean(urldata.parm.get('dos'), False) + except ValueError as exc: + bb.fatal("Invalid value for 'dos' parameter for %s: %s" % + (file, urldata.parm.get('dos'))) + cmd = 'unzip -q -o' + if dos: + cmd = '%s -a' % cmd + cmd = "%s '%s'" % (cmd, file) + elif file.endswith('.rpm') or file.endswith('.srpm'): + if 'extract' in urldata.parm: + unpack_file = urldata.parm.get('extract') + cmd = 'rpm2cpio.sh %s | cpio -id %s' % (file, unpack_file) + iterate = True + iterate_file = unpack_file + else: + cmd = 'rpm2cpio.sh %s | cpio -id' % (file) + elif file.endswith('.deb') or file.endswith('.ipk'): + cmd = 'ar -p %s data.tar.gz | zcat | tar --no-same-owner -xpf -' % file + + if not unpack or not cmd: + # If file == dest, then avoid any copies, as we already put the file into dest! + dest = os.path.join(rootdir, os.path.basename(file)) + if (file != dest) and not (os.path.exists(dest) and os.path.samefile(file, dest)): + if os.path.isdir(file): + # If for example we're asked to copy file://foo/bar, we need to unpack the result into foo/bar + basepath = getattr(urldata, "basepath", None) + destdir = "." + if basepath and basepath.endswith("/"): + basepath = basepath.rstrip("/") + elif basepath: + basepath = os.path.dirname(basepath) + if basepath and basepath.find("/") != -1: + destdir = basepath[:basepath.rfind('/')] + destdir = destdir.strip('/') + if destdir != "." and not os.access("%s/%s" % (rootdir, destdir), os.F_OK): + os.makedirs("%s/%s" % (rootdir, destdir)) + cmd = 'cp -pPR %s %s/%s/' % (file, rootdir, destdir) + #cmd = 'tar -cf - -C "%d" -ps . | tar -xf - -C "%s/%s/"' % (file, rootdir, destdir) + else: + # The "destdir" handling was specifically done for FILESPATH + # items. So, only do so for file:// entries. + if urldata.type == "file" and urldata.path.find("/") != -1: + destdir = urldata.path.rsplit("/", 1)[0] + else: + destdir = "." + bb.utils.mkdirhier("%s/%s" % (rootdir, destdir)) + cmd = 'cp %s %s/%s/' % (file, rootdir, destdir) + + if not cmd: + return + + # Change to subdir before executing command + save_cwd = os.getcwd(); + os.chdir(rootdir) + if 'subdir' in urldata.parm: + newdir = ("%s/%s" % (rootdir, urldata.parm.get('subdir'))) + bb.utils.mkdirhier(newdir) + os.chdir(newdir) + + path = data.getVar('PATH', True) + if path: + cmd = "PATH=\"%s\" %s" % (path, cmd) + bb.note("Unpacking %s to %s/" % (file, os.getcwd())) + ret = subprocess.call(cmd, preexec_fn=subprocess_setup, shell=True) + + os.chdir(save_cwd) + + if ret != 0: + raise UnpackError("Unpack command %s failed with return value %s" % (cmd, ret), urldata.url) + + if iterate is True: + iterate_urldata = urldata + iterate_urldata.localpath = "%s/%s" % (rootdir, iterate_file) + self.unpack(urldata, rootdir, data) + + return + + def clean(self, urldata, d): + """ + Clean any existing full or partial download + """ + bb.utils.remove(urldata.localpath) + + def try_premirror(self, urldata, d): + """ + Should premirrors be used? + """ + return True + + def checkstatus(self, urldata, d): + """ + Check the status of a URL + Assumes localpath was called first + """ + logger.info("URL %s could not be checked for status since no method exists.", url) + return True + + def latest_revision(self, ud, d, name): + """ + Look in the cache for the latest revision, if not present ask the SCM. + """ + if not hasattr(self, "_latest_revision"): + raise ParameterError("The fetcher for this URL does not support _latest_revision", url) + + revs = bb.persist_data.persist('BB_URI_HEADREVS', d) + key = self.generate_revision_key(ud, d, name) + try: + return revs[key] + except KeyError: + revs[key] = rev = self._latest_revision(ud, d, name) + return rev + + def sortable_revision(self, ud, d, name): + latest_rev = self._build_revision(ud, d, name) + return True, str(latest_rev) + + def generate_revision_key(self, ud, d, name): + key = self._revision_key(ud, d, name) + return "%s-%s" % (key, d.getVar("PN", True) or "") + +class Fetch(object): + def __init__(self, urls, d, cache = True, localonly = False): + if localonly and cache: + raise Exception("bb.fetch2.Fetch.__init__: cannot set cache and localonly at same time") + + if len(urls) == 0: + urls = d.getVar("SRC_URI", True).split() + self.urls = urls + self.d = d + self.ud = {} + + fn = d.getVar('FILE', True) + if cache and fn and fn in urldata_cache: + self.ud = urldata_cache[fn] + + for url in urls: + if url not in self.ud: + try: + self.ud[url] = FetchData(url, d, localonly) + except NonLocalMethod: + if localonly: + self.ud[url] = None + pass + + if fn and cache: + urldata_cache[fn] = self.ud + + def localpath(self, url): + if url not in self.urls: + self.ud[url] = FetchData(url, self.d) + + self.ud[url].setup_localpath(self.d) + return self.d.expand(self.ud[url].localpath) + + def localpaths(self): + """ + Return a list of the local filenames, assuming successful fetch + """ + local = [] + + for u in self.urls: + ud = self.ud[u] + ud.setup_localpath(self.d) + local.append(ud.localpath) + + return local + + def download(self, urls = []): + """ + Fetch all urls + """ + if len(urls) == 0: + urls = self.urls + + network = self.d.getVar("BB_NO_NETWORK", True) + premirroronly = (self.d.getVar("BB_FETCH_PREMIRRORONLY", True) == "1") + + for u in urls: + ud = self.ud[u] + ud.setup_localpath(self.d) + m = ud.method + localpath = "" + + lf = bb.utils.lockfile(ud.lockfile) + + try: + self.d.setVar("BB_NO_NETWORK", network) + + if os.path.exists(ud.donestamp) and not m.need_update(ud, self.d): + localpath = ud.localpath + elif m.try_premirror(ud, self.d): + logger.debug(1, "Trying PREMIRRORS") + mirrors = mirror_from_string(self.d.getVar('PREMIRRORS', True)) + localpath = try_mirrors(self.d, ud, mirrors, False) + + if premirroronly: + self.d.setVar("BB_NO_NETWORK", "1") + + os.chdir(self.d.getVar("DL_DIR", True)) + + firsterr = None + if not localpath and ((not os.path.exists(ud.donestamp)) or m.need_update(ud, self.d)): + try: + logger.debug(1, "Trying Upstream") + m.download(ud, self.d) + if hasattr(m, "build_mirror_data"): + m.build_mirror_data(ud, self.d) + localpath = ud.localpath + # early checksum verify, so that if checksum mismatched, + # fetcher still have chance to fetch from mirror + update_stamp(ud, self.d) + + except bb.fetch2.NetworkAccess: + raise + + except BBFetchException as e: + if isinstance(e, ChecksumError): + logger.warn("Checksum failure encountered with download of %s - will attempt other sources if available" % u) + logger.debug(1, str(e)) + rename_bad_checksum(ud, e.checksum) + elif isinstance(e, NoChecksumError): + raise + else: + logger.warn('Failed to fetch URL %s, attempting MIRRORS if available' % u) + logger.debug(1, str(e)) + firsterr = e + # Remove any incomplete fetch + m.clean(ud, self.d) + logger.debug(1, "Trying MIRRORS") + mirrors = mirror_from_string(self.d.getVar('MIRRORS', True)) + localpath = try_mirrors (self.d, ud, mirrors) + + if not localpath or ((not os.path.exists(localpath)) and localpath.find("*") == -1): + if firsterr: + logger.error(str(firsterr)) + raise FetchError("Unable to fetch URL from any source.", u) + + update_stamp(ud, self.d) + + except BBFetchException as e: + if isinstance(e, ChecksumError): + logger.error("Checksum failure fetching %s" % u) + raise + + finally: + bb.utils.unlockfile(lf) + + def checkstatus(self, urls = []): + """ + Check all urls exist upstream + """ + + if len(urls) == 0: + urls = self.urls + + for u in urls: + ud = self.ud[u] + ud.setup_localpath(self.d) + m = ud.method + logger.debug(1, "Testing URL %s", u) + # First try checking uri, u, from PREMIRRORS + mirrors = mirror_from_string(self.d.getVar('PREMIRRORS', True)) + ret = try_mirrors(self.d, ud, mirrors, True) + if not ret: + # Next try checking from the original uri, u + try: + ret = m.checkstatus(ud, self.d) + except: + # Finally, try checking uri, u, from MIRRORS + mirrors = mirror_from_string(self.d.getVar('MIRRORS', True)) + ret = try_mirrors(self.d, ud, mirrors, True) + + if not ret: + raise FetchError("URL %s doesn't work" % u, u) + + def unpack(self, root, urls = []): + """ + Check all urls exist upstream + """ + + if len(urls) == 0: + urls = self.urls + + for u in urls: + ud = self.ud[u] + ud.setup_localpath(self.d) + + if self.d.expand(self.localpath) is None: + continue + + if ud.lockfile: + lf = bb.utils.lockfile(ud.lockfile) + + ud.method.unpack(ud, root, self.d) + + if ud.lockfile: + bb.utils.unlockfile(lf) + + def clean(self, urls = []): + """ + Clean files that the fetcher gets or places + """ + + if len(urls) == 0: + urls = self.urls + + for url in urls: + if url not in self.ud: + self.ud[url] = FetchData(url, d) + ud = self.ud[url] + ud.setup_localpath(self.d) + + if not ud.localfile and ud.localpath is None: + continue + + if ud.lockfile: + lf = bb.utils.lockfile(ud.lockfile) + + ud.method.clean(ud, self.d) + if ud.donestamp: + bb.utils.remove(ud.donestamp) + + if ud.lockfile: + bb.utils.unlockfile(lf) + +from . import cvs +from . import git +from . import gitsm +from . import gitannex +from . import local +from . import svn +from . import wget +from . import ssh +from . import sftp +from . import perforce +from . import bzr +from . import hg +from . import osc +from . import repo + +methods.append(local.Local()) +methods.append(wget.Wget()) +methods.append(svn.Svn()) +methods.append(git.Git()) +methods.append(gitsm.GitSM()) +methods.append(gitannex.GitANNEX()) +methods.append(cvs.Cvs()) +methods.append(ssh.SSH()) +methods.append(sftp.SFTP()) +methods.append(perforce.Perforce()) +methods.append(bzr.Bzr()) +methods.append(hg.Hg()) +methods.append(osc.Osc()) +methods.append(repo.Repo()) diff --git a/bitbake/lib/bb/fetch2/bzr.py b/bitbake/lib/bb/fetch2/bzr.py new file mode 100644 index 0000000000..03e9ac461b --- /dev/null +++ b/bitbake/lib/bb/fetch2/bzr.py @@ -0,0 +1,143 @@ +""" +BitBake 'Fetch' implementation for bzr. + +""" + +# Copyright (C) 2007 Ross Burton +# Copyright (C) 2007 Richard Purdie +# +# Classes for obtaining upstream sources for the +# BitBake build tools. +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import sys +import logging +import bb +from bb import data +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class Bzr(FetchMethod): + def supports(self, ud, d): + return ud.type in ['bzr'] + + def urldata_init(self, ud, d): + """ + init bzr specific variable within url data + """ + # Create paths to bzr checkouts + relpath = self._strip_leading_slashes(ud.path) + ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath) + + ud.setup_revisons(d) + + if not ud.revision: + ud.revision = self.latest_revision(ud, d) + + ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d) + + def _buildbzrcommand(self, ud, d, command): + """ + Build up an bzr commandline based on ud + command is "fetch", "update", "revno" + """ + + basecmd = data.expand('${FETCHCMD_bzr}', d) + + proto = ud.parm.get('protocol', 'http') + + bzrroot = ud.host + ud.path + + options = [] + + if command == "revno": + bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot) + else: + if ud.revision: + options.append("-r %s" % ud.revision) + + if command == "fetch": + bzrcmd = "%s branch %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot) + elif command == "update": + bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options)) + else: + raise FetchError("Invalid bzr command %s" % command, ud.url) + + return bzrcmd + + def download(self, ud, d): + """Fetch url""" + + if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK): + bzrcmd = self._buildbzrcommand(ud, d, "update") + logger.debug(1, "BZR Update %s", ud.url) + bb.fetch2.check_network_access(d, bzrcmd, ud.url) + os.chdir(os.path.join (ud.pkgdir, os.path.basename(ud.path))) + runfetchcmd(bzrcmd, d) + else: + bb.utils.remove(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir)), True) + bzrcmd = self._buildbzrcommand(ud, d, "fetch") + bb.fetch2.check_network_access(d, bzrcmd, ud.url) + logger.debug(1, "BZR Checkout %s", ud.url) + bb.utils.mkdirhier(ud.pkgdir) + os.chdir(ud.pkgdir) + logger.debug(1, "Running %s", bzrcmd) + runfetchcmd(bzrcmd, d) + + os.chdir(ud.pkgdir) + + scmdata = ud.parm.get("scmdata", "") + if scmdata == "keep": + tar_flags = "" + else: + tar_flags = "--exclude '.bzr' --exclude '.bzrtags'" + + # tar them up to a defined filename + runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(ud.pkgdir)), d, cleanup = [ud.localpath]) + + def supports_srcrev(self): + return True + + def _revision_key(self, ud, d, name): + """ + Return a unique key for the url + """ + return "bzr:" + ud.pkgdir + + def _latest_revision(self, ud, d, name): + """ + Return the latest upstream revision number + """ + logger.debug(2, "BZR fetcher hitting network for %s", ud.url) + + bb.fetch2.check_network_access(d, self._buildbzrcommand(ud, d, "revno"), ud.url) + + output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True) + + return output.strip() + + def sortable_revision(self, ud, d, name): + """ + Return a sortable revision number which in our case is the revision number + """ + + return False, self._build_revision(ud, d) + + def _build_revision(self, ud, d): + return ud.revision diff --git a/bitbake/lib/bb/fetch2/cvs.py b/bitbake/lib/bb/fetch2/cvs.py new file mode 100644 index 0000000000..d27d96f68c --- /dev/null +++ b/bitbake/lib/bb/fetch2/cvs.py @@ -0,0 +1,171 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementations + +Classes for obtaining upstream sources for the +BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +#Based on functions from the base bb module, Copyright 2003 Holger Schurig +# + +import os +import logging +import bb +from bb.fetch2 import FetchMethod, FetchError, MissingParameterError, logger +from bb.fetch2 import runfetchcmd + +class Cvs(FetchMethod): + """ + Class to fetch a module or modules from cvs repositories + """ + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with cvs. + """ + return ud.type in ['cvs'] + + def urldata_init(self, ud, d): + if not "module" in ud.parm: + raise MissingParameterError("module", ud.url) + ud.module = ud.parm["module"] + + ud.tag = ud.parm.get('tag', "") + + # Override the default date in certain cases + if 'date' in ud.parm: + ud.date = ud.parm['date'] + elif ud.tag: + ud.date = "" + + norecurse = '' + if 'norecurse' in ud.parm: + norecurse = '_norecurse' + + fullpath = '' + if 'fullpath' in ud.parm: + fullpath = '_fullpath' + + ud.localfile = bb.data.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath), d) + + def need_update(self, ud, d): + if (ud.date == "now"): + return True + if not os.path.exists(ud.localpath): + return True + return False + + def download(self, ud, d): + + method = ud.parm.get('method', 'pserver') + localdir = ud.parm.get('localdir', ud.module) + cvs_port = ud.parm.get('port', '') + + cvs_rsh = None + if method == "ext": + if "rsh" in ud.parm: + cvs_rsh = ud.parm["rsh"] + + if method == "dir": + cvsroot = ud.path + else: + cvsroot = ":" + method + cvsproxyhost = d.getVar('CVS_PROXY_HOST', True) + if cvsproxyhost: + cvsroot += ";proxy=" + cvsproxyhost + cvsproxyport = d.getVar('CVS_PROXY_PORT', True) + if cvsproxyport: + cvsroot += ";proxyport=" + cvsproxyport + cvsroot += ":" + ud.user + if ud.pswd: + cvsroot += ":" + ud.pswd + cvsroot += "@" + ud.host + ":" + cvs_port + ud.path + + options = [] + if 'norecurse' in ud.parm: + options.append("-l") + if ud.date: + # treat YYYYMMDDHHMM specially for CVS + if len(ud.date) == 12: + options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12])) + else: + options.append("-D \"%s UTC\"" % ud.date) + if ud.tag: + options.append("-r %s" % ud.tag) + + cvsbasecmd = d.getVar("FETCHCMD_cvs", True) + cvscmd = cvsbasecmd + " '-d" + cvsroot + "' co " + " ".join(options) + " " + ud.module + cvsupdatecmd = cvsbasecmd + " '-d" + cvsroot + "' update -d -P " + " ".join(options) + + if cvs_rsh: + cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd) + cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd) + + # create module directory + logger.debug(2, "Fetch: checking for module directory") + pkg = d.getVar('PN', True) + pkgdir = os.path.join(d.getVar('CVSDIR', True), pkg) + moddir = os.path.join(pkgdir, localdir) + if os.access(os.path.join(moddir, 'CVS'), os.R_OK): + logger.info("Update " + ud.url) + bb.fetch2.check_network_access(d, cvsupdatecmd, ud.url) + # update sources there + os.chdir(moddir) + cmd = cvsupdatecmd + else: + logger.info("Fetch " + ud.url) + # check out sources there + bb.utils.mkdirhier(pkgdir) + os.chdir(pkgdir) + logger.debug(1, "Running %s", cvscmd) + bb.fetch2.check_network_access(d, cvscmd, ud.url) + cmd = cvscmd + + runfetchcmd(cmd, d, cleanup = [moddir]) + + if not os.access(moddir, os.R_OK): + raise FetchError("Directory %s was not readable despite sucessful fetch?!" % moddir, ud.url) + + scmdata = ud.parm.get("scmdata", "") + if scmdata == "keep": + tar_flags = "" + else: + tar_flags = "--exclude 'CVS'" + + # tar them up to a defined filename + if 'fullpath' in ud.parm: + os.chdir(pkgdir) + cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, localdir) + else: + os.chdir(moddir) + os.chdir('..') + cmd = "tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.basename(moddir)) + + runfetchcmd(cmd, d, cleanup = [ud.localpath]) + + def clean(self, ud, d): + """ Clean CVS Files and tarballs """ + + pkg = d.getVar('PN', True) + pkgdir = os.path.join(d.getVar("CVSDIR", True), pkg) + + bb.utils.remove(pkgdir, True) + bb.utils.remove(ud.localpath) + diff --git a/bitbake/lib/bb/fetch2/git.py b/bitbake/lib/bb/fetch2/git.py new file mode 100644 index 0000000000..9ca24428a1 --- /dev/null +++ b/bitbake/lib/bb/fetch2/git.py @@ -0,0 +1,355 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' git implementation + +git fetcher support the SRC_URI with format of: +SRC_URI = "git://some.host/somepath;OptionA=xxx;OptionB=xxx;..." + +Supported SRC_URI options are: + +- branch + The git branch to retrieve from. The default is "master" + + This option also supports multiple branch fetching, with branches + separated by commas. In multiple branches case, the name option + must have the same number of names to match the branches, which is + used to specify the SRC_REV for the branch + e.g: + SRC_URI="git://some.host/somepath;branch=branchX,branchY;name=nameX,nameY" + SRCREV_nameX = "xxxxxxxxxxxxxxxxxxxx" + SRCREV_nameY = "YYYYYYYYYYYYYYYYYYYY" + +- tag + The git tag to retrieve. The default is "master" + +- protocol + The method to use to access the repository. Common options are "git", + "http", "https", "file", "ssh" and "rsync". The default is "git". + +- rebaseable + rebaseable indicates that the upstream git repo may rebase in the future, + and current revision may disappear from upstream repo. This option will + remind fetcher to preserve local cache carefully for future use. + The default value is "0", set rebaseable=1 for rebaseable git repo. + +- nocheckout + Don't checkout source code when unpacking. set this option for the recipe + who has its own routine to checkout code. + The default is "0", set nocheckout=1 if needed. + +- bareclone + Create a bare clone of the source code and don't checkout the source code + when unpacking. Set this option for the recipe who has its own routine to + checkout code and tracking branch requirements. + The default is "0", set bareclone=1 if needed. + +- nobranch + Don't check the SHA validation for branch. set this option for the recipe + referring to commit which is valid in tag instead of branch. + The default is "0", set nobranch=1 if needed. + +""" + +#Copyright (C) 2005 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import bb +from bb import data +from bb.fetch2 import FetchMethod +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class Git(FetchMethod): + """Class to fetch a module or modules from git repositories""" + def init(self, d): + pass + + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with git. + """ + return ud.type in ['git'] + + def supports_checksum(self, urldata): + return False + + def urldata_init(self, ud, d): + """ + init git specific variable within url data + so that the git method like latest_revision() can work + """ + if 'protocol' in ud.parm: + ud.proto = ud.parm['protocol'] + elif not ud.host: + ud.proto = 'file' + else: + ud.proto = "git" + + if not ud.proto in ('git', 'file', 'ssh', 'http', 'https', 'rsync'): + raise bb.fetch2.ParameterError("Invalid protocol type", ud.url) + + ud.nocheckout = ud.parm.get("nocheckout","0") == "1" + + ud.rebaseable = ud.parm.get("rebaseable","0") == "1" + + ud.nobranch = ud.parm.get("nobranch","0") == "1" + + # bareclone implies nocheckout + ud.bareclone = ud.parm.get("bareclone","0") == "1" + if ud.bareclone: + ud.nocheckout = 1 + + ud.unresolvedrev = {} + branches = ud.parm.get("branch", "master").split(',') + if len(branches) != len(ud.names): + raise bb.fetch2.ParameterError("The number of name and branch parameters is not balanced", ud.url) + ud.branches = {} + for name in ud.names: + branch = branches[ud.names.index(name)] + ud.branches[name] = branch + ud.unresolvedrev[name] = branch + + ud.basecmd = data.getVar("FETCHCMD_git", d, True) or "git" + + ud.write_tarballs = ((data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) or "0") != "0") or ud.rebaseable + + ud.setup_revisons(d) + + for name in ud.names: + # Ensure anything that doesn't look like a sha256 checksum/revision is translated into one + if not ud.revisions[name] or len(ud.revisions[name]) != 40 or (False in [c in "abcdef0123456789" for c in ud.revisions[name]]): + if ud.revisions[name]: + ud.unresolvedrev[name] = ud.revisions[name] + ud.revisions[name] = self.latest_revision(ud, d, name) + + gitsrcname = '%s%s' % (ud.host.replace(':','.'), ud.path.replace('/', '.').replace('*', '.')) + # for rebaseable git repo, it is necessary to keep mirror tar ball + # per revision, so that even the revision disappears from the + # upstream repo in the future, the mirror will remain intact and still + # contains the revision + if ud.rebaseable: + for name in ud.names: + gitsrcname = gitsrcname + '_' + ud.revisions[name] + ud.mirrortarball = 'git2_%s.tar.gz' % (gitsrcname) + ud.fullmirror = os.path.join(d.getVar("DL_DIR", True), ud.mirrortarball) + gitdir = d.getVar("GITDIR", True) or (d.getVar("DL_DIR", True) + "/git2/") + ud.clonedir = os.path.join(gitdir, gitsrcname) + + ud.localfile = ud.clonedir + + def localpath(self, ud, d): + return ud.clonedir + + def need_update(self, ud, d): + if not os.path.exists(ud.clonedir): + return True + os.chdir(ud.clonedir) + for name in ud.names: + if not self._contains_ref(ud, d, name): + return True + if ud.write_tarballs and not os.path.exists(ud.fullmirror): + return True + return False + + def try_premirror(self, ud, d): + # If we don't do this, updating an existing checkout with only premirrors + # is not possible + if d.getVar("BB_FETCH_PREMIRRORONLY", True) is not None: + return True + if os.path.exists(ud.clonedir): + return False + return True + + def download(self, ud, d): + """Fetch url""" + + if ud.user: + username = ud.user + '@' + else: + username = "" + + ud.repochanged = not os.path.exists(ud.fullmirror) + + # If the checkout doesn't exist and the mirror tarball does, extract it + if not os.path.exists(ud.clonedir) and os.path.exists(ud.fullmirror): + bb.utils.mkdirhier(ud.clonedir) + os.chdir(ud.clonedir) + runfetchcmd("tar -xzf %s" % (ud.fullmirror), d) + + repourl = "%s://%s%s%s" % (ud.proto, username, ud.host, ud.path) + + # If the repo still doesn't exist, fallback to cloning it + if not os.path.exists(ud.clonedir): + # We do this since git will use a "-l" option automatically for local urls where possible + if repourl.startswith("file://"): + repourl = repourl[7:] + clone_cmd = "%s clone --bare --mirror %s %s" % (ud.basecmd, repourl, ud.clonedir) + if ud.proto.lower() != 'file': + bb.fetch2.check_network_access(d, clone_cmd) + runfetchcmd(clone_cmd, d) + + os.chdir(ud.clonedir) + # Update the checkout if needed + needupdate = False + for name in ud.names: + if not self._contains_ref(ud, d, name): + needupdate = True + if needupdate: + try: + runfetchcmd("%s remote rm origin" % ud.basecmd, d) + except bb.fetch2.FetchError: + logger.debug(1, "No Origin") + + runfetchcmd("%s remote add --mirror=fetch origin %s" % (ud.basecmd, repourl), d) + fetch_cmd = "%s fetch -f --prune %s refs/*:refs/*" % (ud.basecmd, repourl) + if ud.proto.lower() != 'file': + bb.fetch2.check_network_access(d, fetch_cmd, ud.url) + runfetchcmd(fetch_cmd, d) + runfetchcmd("%s prune-packed" % ud.basecmd, d) + runfetchcmd("%s pack-redundant --all | xargs -r rm" % ud.basecmd, d) + ud.repochanged = True + os.chdir(ud.clonedir) + for name in ud.names: + if not self._contains_ref(ud, d, name): + raise bb.fetch2.FetchError("Unable to find revision %s in branch %s even from upstream" % (ud.revisions[name], ud.branches[name])) + + def build_mirror_data(self, ud, d): + # Generate a mirror tarball if needed + if ud.write_tarballs and (ud.repochanged or not os.path.exists(ud.fullmirror)): + # it's possible that this symlink points to read-only filesystem with PREMIRROR + if os.path.islink(ud.fullmirror): + os.unlink(ud.fullmirror) + + os.chdir(ud.clonedir) + logger.info("Creating tarball of git repository") + runfetchcmd("tar -czf %s %s" % (ud.fullmirror, os.path.join(".") ), d) + runfetchcmd("touch %s.done" % (ud.fullmirror), d) + + def unpack(self, ud, destdir, d): + """ unpack the downloaded src to destdir""" + + subdir = ud.parm.get("subpath", "") + if subdir != "": + readpathspec = ":%s" % (subdir) + def_destsuffix = "%s/" % os.path.basename(subdir) + else: + readpathspec = "" + def_destsuffix = "git/" + + destsuffix = ud.parm.get("destsuffix", def_destsuffix) + destdir = ud.destdir = os.path.join(destdir, destsuffix) + if os.path.exists(destdir): + bb.utils.prunedir(destdir) + + cloneflags = "-s -n" + if ud.bareclone: + cloneflags += " --mirror" + + # Versions of git prior to 1.7.9.2 have issues where foo.git and foo get confused + # and you end up with some horrible union of the two when you attempt to clone it + # The least invasive workaround seems to be a symlink to the real directory to + # fool git into ignoring any .git version that may also be present. + # + # The issue is fixed in more recent versions of git so we can drop this hack in future + # when that version becomes common enough. + clonedir = ud.clonedir + if not ud.path.endswith(".git"): + indirectiondir = destdir[:-1] + ".indirectionsymlink" + if os.path.exists(indirectiondir): + os.remove(indirectiondir) + bb.utils.mkdirhier(os.path.dirname(indirectiondir)) + os.symlink(ud.clonedir, indirectiondir) + clonedir = indirectiondir + + runfetchcmd("git clone %s %s/ %s" % (cloneflags, clonedir, destdir), d) + if not ud.nocheckout: + os.chdir(destdir) + if subdir != "": + runfetchcmd("%s read-tree %s%s" % (ud.basecmd, ud.revisions[ud.names[0]], readpathspec), d) + runfetchcmd("%s checkout-index -q -f -a" % ud.basecmd, d) + else: + runfetchcmd("%s checkout %s" % (ud.basecmd, ud.revisions[ud.names[0]]), d) + return True + + def clean(self, ud, d): + """ clean the git directory """ + + bb.utils.remove(ud.localpath, True) + bb.utils.remove(ud.fullmirror) + bb.utils.remove(ud.fullmirror + ".done") + + def supports_srcrev(self): + return True + + def _contains_ref(self, ud, d, name): + cmd = "" + if ud.nobranch: + cmd = "%s log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % ( + ud.basecmd, ud.revisions[name]) + else: + cmd = "%s branch --contains %s --list %s 2> /dev/null | wc -l" % ( + ud.basecmd, ud.revisions[name], ud.branches[name]) + try: + output = runfetchcmd(cmd, d, quiet=True) + except bb.fetch2.FetchError: + return False + if len(output.split()) > 1: + raise bb.fetch2.FetchError("The command '%s' gave output with more then 1 line unexpectedly, output: '%s'" % (cmd, output)) + return output.split()[0] != "0" + + def _revision_key(self, ud, d, name): + """ + Return a unique key for the url + """ + return "git:" + ud.host + ud.path.replace('/', '.') + ud.unresolvedrev[name] + + def _lsremote(self, ud, d, search): + """ + Run git ls-remote with the specified search string + """ + if ud.user: + username = ud.user + '@' + else: + username = "" + + cmd = "%s ls-remote %s://%s%s%s %s" % \ + (ud.basecmd, ud.proto, username, ud.host, ud.path, search) + if ud.proto.lower() != 'file': + bb.fetch2.check_network_access(d, cmd) + output = runfetchcmd(cmd, d, True) + if not output: + raise bb.fetch2.FetchError("The command %s gave empty output unexpectedly" % cmd, ud.url) + return output + + def _latest_revision(self, ud, d, name): + """ + Compute the HEAD revision for the url + """ + search = "refs/heads/%s refs/tags/%s^{}" % (ud.unresolvedrev[name], ud.unresolvedrev[name]) + output = self._lsremote(ud, d, search) + return output.split()[0] + + def _build_revision(self, ud, d, name): + return ud.revisions[name] + + def checkstatus(self, ud, d): + fetchcmd = "%s ls-remote %s" % (ud.basecmd, ud.url) + try: + runfetchcmd(fetchcmd, d, quiet=True) + return True + except FetchError: + return False diff --git a/bitbake/lib/bb/fetch2/gitannex.py b/bitbake/lib/bb/fetch2/gitannex.py new file mode 100644 index 0000000000..0f37897450 --- /dev/null +++ b/bitbake/lib/bb/fetch2/gitannex.py @@ -0,0 +1,76 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' git annex implementation +""" + +# Copyright (C) 2014 Otavio Salvador +# Copyright (C) 2014 O.S. Systems Software LTDA. +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import bb +from bb import data +from bb.fetch2.git import Git +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class GitANNEX(Git): + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with git. + """ + return ud.type in ['gitannex'] + + def uses_annex(self, ud, d): + for name in ud.names: + try: + runfetchcmd("%s rev-list git-annex" % (ud.basecmd), d, quiet=True) + return True + except bb.fetch.FetchError: + pass + + return False + + def update_annex(self, ud, d): + try: + runfetchcmd("%s annex get --all" % (ud.basecmd), d, quiet=True) + except bb.fetch.FetchError: + return False + runfetchcmd("chmod u+w -R %s/annex" % (ud.clonedir), d, quiet=True) + + return True + + def download(self, ud, d): + Git.download(self, ud, d) + + os.chdir(ud.clonedir) + annex = self.uses_annex(ud, d) + if annex: + self.update_annex(ud, d) + + def unpack(self, ud, destdir, d): + Git.unpack(self, ud, destdir, d) + + os.chdir(ud.destdir) + try: + runfetchcmd("%s annex sync" % (ud.basecmd), d) + except bb.fetch.FetchError: + pass + + annex = self.uses_annex(ud, d) + if annex: + runfetchcmd("%s annex get" % (ud.basecmd), d) + runfetchcmd("chmod u+w -R %s/.git/annex" % (ud.destdir), d, quiet=True) diff --git a/bitbake/lib/bb/fetch2/gitsm.py b/bitbake/lib/bb/fetch2/gitsm.py new file mode 100644 index 0000000000..1a762153c4 --- /dev/null +++ b/bitbake/lib/bb/fetch2/gitsm.py @@ -0,0 +1,126 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' git submodules implementation +""" + +# Copyright (C) 2013 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import bb +from bb import data +from bb.fetch2.git import Git +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class GitSM(Git): + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with git. + """ + return ud.type in ['gitsm'] + + def uses_submodules(self, ud, d): + for name in ud.names: + try: + runfetchcmd("%s show %s:.gitmodules" % (ud.basecmd, ud.revisions[name]), d, quiet=True) + return True + except bb.fetch.FetchError: + pass + return False + + def _set_relative_paths(self, repopath): + """ + Fix submodule paths to be relative instead of absolute, + so that when we move the repo it doesn't break + (In Git 1.7.10+ this is done automatically) + """ + submodules = [] + with open(os.path.join(repopath, '.gitmodules'), 'r') as f: + for line in f.readlines(): + if line.startswith('[submodule'): + submodules.append(line.split('"')[1]) + + for module in submodules: + repo_conf = os.path.join(repopath, module, '.git') + if os.path.exists(repo_conf): + with open(repo_conf, 'r') as f: + lines = f.readlines() + newpath = '' + for i, line in enumerate(lines): + if line.startswith('gitdir:'): + oldpath = line.split(': ')[-1].rstrip() + if oldpath.startswith('/'): + newpath = '../' * (module.count('/') + 1) + '.git/modules/' + module + lines[i] = 'gitdir: %s\n' % newpath + break + if newpath: + with open(repo_conf, 'w') as f: + for line in lines: + f.write(line) + + repo_conf2 = os.path.join(repopath, '.git', 'modules', module, 'config') + if os.path.exists(repo_conf2): + with open(repo_conf2, 'r') as f: + lines = f.readlines() + newpath = '' + for i, line in enumerate(lines): + if line.lstrip().startswith('worktree = '): + oldpath = line.split(' = ')[-1].rstrip() + if oldpath.startswith('/'): + newpath = '../' * (module.count('/') + 3) + module + lines[i] = '\tworktree = %s\n' % newpath + break + if newpath: + with open(repo_conf2, 'w') as f: + for line in lines: + f.write(line) + + def update_submodules(self, ud, d): + # We have to convert bare -> full repo, do the submodule bit, then convert back + tmpclonedir = ud.clonedir + ".tmp" + gitdir = tmpclonedir + os.sep + ".git" + bb.utils.remove(tmpclonedir, True) + os.mkdir(tmpclonedir) + os.rename(ud.clonedir, gitdir) + runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*true/bare = false/'", d) + os.chdir(tmpclonedir) + runfetchcmd(ud.basecmd + " reset --hard", d) + runfetchcmd(ud.basecmd + " submodule init", d) + runfetchcmd(ud.basecmd + " submodule update", d) + self._set_relative_paths(tmpclonedir) + runfetchcmd("sed " + gitdir + "/config -i -e 's/bare.*=.*false/bare = true/'", d) + os.rename(gitdir, ud.clonedir,) + bb.utils.remove(tmpclonedir, True) + + def download(self, ud, d): + Git.download(self, ud, d) + + os.chdir(ud.clonedir) + submodules = self.uses_submodules(ud, d) + if submodules: + self.update_submodules(ud, d) + + def unpack(self, ud, destdir, d): + Git.unpack(self, ud, destdir, d) + + os.chdir(ud.destdir) + submodules = self.uses_submodules(ud, d) + if submodules: + runfetchcmd("cp -r " + ud.clonedir + "/modules " + ud.destdir + "/.git/", d) + runfetchcmd(ud.basecmd + " submodule init", d) + runfetchcmd(ud.basecmd + " submodule update", d) + diff --git a/bitbake/lib/bb/fetch2/hg.py b/bitbake/lib/bb/fetch2/hg.py new file mode 100644 index 0000000000..6927f6111e --- /dev/null +++ b/bitbake/lib/bb/fetch2/hg.py @@ -0,0 +1,187 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementation for mercurial DRCS (hg). + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2004 Marcin Juszkiewicz +# Copyright (C) 2007 Robert Schuster +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import sys +import logging +import bb +from bb import data +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import MissingParameterError +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class Hg(FetchMethod): + """Class to fetch from mercurial repositories""" + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with mercurial. + """ + return ud.type in ['hg'] + + def urldata_init(self, ud, d): + """ + init hg specific variable within url data + """ + if not "module" in ud.parm: + raise MissingParameterError('module', ud.url) + + ud.module = ud.parm["module"] + + # Create paths to mercurial checkouts + relpath = self._strip_leading_slashes(ud.path) + ud.pkgdir = os.path.join(data.expand('${HGDIR}', d), ud.host, relpath) + ud.moddir = os.path.join(ud.pkgdir, ud.module) + + ud.setup_revisons(d) + + if 'rev' in ud.parm: + ud.revision = ud.parm['rev'] + elif not ud.revision: + ud.revision = self.latest_revision(ud, d) + + ud.localfile = data.expand('%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d) + + def need_update(self, ud, d): + revTag = ud.parm.get('rev', 'tip') + if revTag == "tip": + return True + if not os.path.exists(ud.localpath): + return True + return False + + def _buildhgcommand(self, ud, d, command): + """ + Build up an hg commandline based on ud + command is "fetch", "update", "info" + """ + + basecmd = data.expand('${FETCHCMD_hg}', d) + + proto = ud.parm.get('protocol', 'http') + + host = ud.host + if proto == "file": + host = "/" + ud.host = "localhost" + + if not ud.user: + hgroot = host + ud.path + else: + if ud.pswd: + hgroot = ud.user + ":" + ud.pswd + "@" + host + ud.path + else: + hgroot = ud.user + "@" + host + ud.path + + if command == "info": + return "%s identify -i %s://%s/%s" % (basecmd, proto, hgroot, ud.module) + + options = []; + + # Don't specify revision for the fetch; clone the entire repo. + # This avoids an issue if the specified revision is a tag, because + # the tag actually exists in the specified revision + 1, so it won't + # be available when used in any successive commands. + if ud.revision and command != "fetch": + options.append("-r %s" % ud.revision) + + if command == "fetch": + cmd = "%s clone %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, hgroot, ud.module, ud.module) + elif command == "pull": + # do not pass options list; limiting pull to rev causes the local + # repo not to contain it and immediately following "update" command + # will crash + if ud.user and ud.pswd: + cmd = "%s --config auth.default.prefix=* --config auth.default.username=%s --config auth.default.password=%s --config \"auth.default.schemes=%s\" pull" % (basecmd, ud.user, ud.pswd, proto) + else: + cmd = "%s pull" % (basecmd) + elif command == "update": + cmd = "%s update -C %s" % (basecmd, " ".join(options)) + else: + raise FetchError("Invalid hg command %s" % command, ud.url) + + return cmd + + def download(self, ud, d): + """Fetch url""" + + logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'") + + if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK): + updatecmd = self._buildhgcommand(ud, d, "pull") + logger.info("Update " + ud.url) + # update sources there + os.chdir(ud.moddir) + logger.debug(1, "Running %s", updatecmd) + bb.fetch2.check_network_access(d, updatecmd, ud.url) + runfetchcmd(updatecmd, d) + + else: + fetchcmd = self._buildhgcommand(ud, d, "fetch") + logger.info("Fetch " + ud.url) + # check out sources there + bb.utils.mkdirhier(ud.pkgdir) + os.chdir(ud.pkgdir) + logger.debug(1, "Running %s", fetchcmd) + bb.fetch2.check_network_access(d, fetchcmd, ud.url) + runfetchcmd(fetchcmd, d) + + # Even when we clone (fetch), we still need to update as hg's clone + # won't checkout the specified revision if its on a branch + updatecmd = self._buildhgcommand(ud, d, "update") + os.chdir(ud.moddir) + logger.debug(1, "Running %s", updatecmd) + runfetchcmd(updatecmd, d) + + scmdata = ud.parm.get("scmdata", "") + if scmdata == "keep": + tar_flags = "" + else: + tar_flags = "--exclude '.hg' --exclude '.hgrags'" + + os.chdir(ud.pkgdir) + runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.module), d, cleanup = [ud.localpath]) + + def supports_srcrev(self): + return True + + def _latest_revision(self, ud, d, name): + """ + Compute tip revision for the url + """ + bb.fetch2.check_network_access(d, self._buildhgcommand(ud, d, "info")) + output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d) + return output.strip() + + def _build_revision(self, ud, d, name): + return ud.revision + + def _revision_key(self, ud, d, name): + """ + Return a unique key for the url + """ + return "hg:" + ud.moddir diff --git a/bitbake/lib/bb/fetch2/local.py b/bitbake/lib/bb/fetch2/local.py new file mode 100644 index 0000000000..5c4e42a942 --- /dev/null +++ b/bitbake/lib/bb/fetch2/local.py @@ -0,0 +1,116 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementations + +Classes for obtaining upstream sources for the +BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import urllib +import bb +import bb.utils +from bb import data +from bb.fetch2 import FetchMethod, FetchError +from bb.fetch2 import logger + +class Local(FetchMethod): + def supports(self, urldata, d): + """ + Check to see if a given url represents a local fetch. + """ + return urldata.type in ['file'] + + def urldata_init(self, ud, d): + # We don't set localfile as for this fetcher the file is already local! + ud.decodedurl = urllib.unquote(ud.url.split("://")[1].split(";")[0]) + ud.basename = os.path.basename(ud.decodedurl) + ud.basepath = ud.decodedurl + return + + def localpath(self, urldata, d): + """ + Return the local filename of a given url assuming a successful fetch. + """ + path = urldata.decodedurl + newpath = path + if path[0] != "/": + filespath = data.getVar('FILESPATH', d, True) + if filespath: + logger.debug(2, "Searching for %s in paths: \n%s" % (path, "\n ".join(filespath.split(":")))) + newpath = bb.utils.which(filespath, path) + if not newpath: + filesdir = data.getVar('FILESDIR', d, True) + if filesdir: + logger.debug(2, "Searching for %s in path: %s" % (path, filesdir)) + newpath = os.path.join(filesdir, path) + if (not newpath or not os.path.exists(newpath)) and path.find("*") != -1: + # For expressions using '*', best we can do is take the first directory in FILESPATH that exists + newpath = bb.utils.which(filespath, ".") + logger.debug(2, "Searching for %s in path: %s" % (path, newpath)) + return newpath + if not os.path.exists(newpath): + dldirfile = os.path.join(d.getVar("DL_DIR", True), path) + logger.debug(2, "Defaulting to %s for %s" % (dldirfile, path)) + bb.utils.mkdirhier(os.path.dirname(dldirfile)) + return dldirfile + return newpath + + def need_update(self, ud, d): + if ud.url.find("*") != -1: + return False + if os.path.exists(ud.localpath): + return False + return True + + def download(self, urldata, d): + """Fetch urls (no-op for Local method)""" + # no need to fetch local files, we'll deal with them in place. + if self.supports_checksum(urldata) and not os.path.exists(urldata.localpath): + locations = [] + filespath = data.getVar('FILESPATH', d, True) + if filespath: + locations = filespath.split(":") + filesdir = data.getVar('FILESDIR', d, True) + if filesdir: + locations.append(filesdir) + locations.append(d.getVar("DL_DIR", True)) + + msg = "Unable to find file " + urldata.url + " anywhere. The paths that were searched were:\n " + "\n ".join(locations) + raise FetchError(msg) + + return True + + def checkstatus(self, urldata, d): + """ + Check the status of the url + """ + if urldata.localpath.find("*") != -1: + logger.info("URL %s looks like a glob and was therefore not checked.", urldata.url) + return True + if os.path.exists(urldata.localpath): + return True + return False + + def clean(self, urldata, d): + return + diff --git a/bitbake/lib/bb/fetch2/osc.py b/bitbake/lib/bb/fetch2/osc.py new file mode 100644 index 0000000000..3d8779682f --- /dev/null +++ b/bitbake/lib/bb/fetch2/osc.py @@ -0,0 +1,135 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +Bitbake "Fetch" implementation for osc (Opensuse build service client). +Based on the svn "Fetch" implementation. + +""" + +import os +import sys +import logging +import bb +from bb import data +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import MissingParameterError +from bb.fetch2 import runfetchcmd + +class Osc(FetchMethod): + """Class to fetch a module or modules from Opensuse build server + repositories.""" + + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with osc. + """ + return ud.type in ['osc'] + + def urldata_init(self, ud, d): + if not "module" in ud.parm: + raise MissingParameterError('module', ud.url) + + ud.module = ud.parm["module"] + + # Create paths to osc checkouts + relpath = self._strip_leading_slashes(ud.path) + ud.pkgdir = os.path.join(data.expand('${OSCDIR}', d), ud.host) + ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module) + + if 'rev' in ud.parm: + ud.revision = ud.parm['rev'] + else: + pv = data.getVar("PV", d, 0) + rev = bb.fetch2.srcrev_internal_helper(ud, d) + if rev and rev != True: + ud.revision = rev + else: + ud.revision = "" + + ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d) + + def _buildosccommand(self, ud, d, command): + """ + Build up an ocs commandline based on ud + command is "fetch", "update", "info" + """ + + basecmd = data.expand('${FETCHCMD_osc}', d) + + proto = ud.parm.get('protocol', 'ocs') + + options = [] + + config = "-c %s" % self.generate_config(ud, d) + + if ud.revision: + options.append("-r %s" % ud.revision) + + coroot = self._strip_leading_slashes(ud.path) + + if command == "fetch": + osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options)) + elif command == "update": + osccmd = "%s %s up %s" % (basecmd, config, " ".join(options)) + else: + raise FetchError("Invalid osc command %s" % command, ud.url) + + return osccmd + + def download(self, ud, d): + """ + Fetch url + """ + + logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'") + + if os.access(os.path.join(data.expand('${OSCDIR}', d), ud.path, ud.module), os.R_OK): + oscupdatecmd = self._buildosccommand(ud, d, "update") + logger.info("Update "+ ud.url) + # update sources there + os.chdir(ud.moddir) + logger.debug(1, "Running %s", oscupdatecmd) + bb.fetch2.check_network_access(d, oscupdatecmd, ud.url) + runfetchcmd(oscupdatecmd, d) + else: + oscfetchcmd = self._buildosccommand(ud, d, "fetch") + logger.info("Fetch " + ud.url) + # check out sources there + bb.utils.mkdirhier(ud.pkgdir) + os.chdir(ud.pkgdir) + logger.debug(1, "Running %s", oscfetchcmd) + bb.fetch2.check_network_access(d, oscfetchcmd, ud.url) + runfetchcmd(oscfetchcmd, d) + + os.chdir(os.path.join(ud.pkgdir + ud.path)) + # tar them up to a defined filename + runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d, cleanup = [ud.localpath]) + + def supports_srcrev(self): + return False + + def generate_config(self, ud, d): + """ + Generate a .oscrc to be used for this run. + """ + + config_path = os.path.join(data.expand('${OSCDIR}', d), "oscrc") + if (os.path.exists(config_path)): + os.remove(config_path) + + f = open(config_path, 'w') + f.write("[general]\n") + f.write("apisrv = %s\n" % ud.host) + f.write("scheme = http\n") + f.write("su-wrapper = su -c\n") + f.write("build-root = %s\n" % data.expand('${WORKDIR}', d)) + f.write("urllist = http://moblin-obs.jf.intel.com:8888/build/%(project)s/%(repository)s/%(buildarch)s/:full/%(name)s.rpm\n") + f.write("extra-pkgs = gzip\n") + f.write("\n") + f.write("[%s]\n" % ud.host) + f.write("user = %s\n" % ud.parm["user"]) + f.write("pass = %s\n" % ud.parm["pswd"]) + f.close() + + return config_path diff --git a/bitbake/lib/bb/fetch2/perforce.py b/bitbake/lib/bb/fetch2/perforce.py new file mode 100644 index 0000000000..9329d72779 --- /dev/null +++ b/bitbake/lib/bb/fetch2/perforce.py @@ -0,0 +1,194 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementations + +Classes for obtaining upstream sources for the +BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +from future_builtins import zip +import os +import subprocess +import logging +import bb +from bb import data +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import logger +from bb.fetch2 import runfetchcmd + +class Perforce(FetchMethod): + def supports(self, ud, d): + return ud.type in ['p4'] + + def doparse(url, d): + parm = {} + path = url.split("://")[1] + delim = path.find("@"); + if delim != -1: + (user, pswd, host, port) = path.split('@')[0].split(":") + path = path.split('@')[1] + else: + (host, port) = data.getVar('P4PORT', d).split(':') + user = "" + pswd = "" + + if path.find(";") != -1: + keys=[] + values=[] + plist = path.split(';') + for item in plist: + if item.count('='): + (key, value) = item.split('=') + keys.append(key) + values.append(value) + + parm = dict(zip(keys, values)) + path = "//" + path.split(';')[0] + host += ":%s" % (port) + parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm) + + return host, path, user, pswd, parm + doparse = staticmethod(doparse) + + def getcset(d, depot, host, user, pswd, parm): + p4opt = "" + if "cset" in parm: + return parm["cset"]; + if user: + p4opt += " -u %s" % (user) + if pswd: + p4opt += " -P %s" % (pswd) + if host: + p4opt += " -p %s" % (host) + + p4date = data.getVar("P4DATE", d, True) + if "revision" in parm: + depot += "#%s" % (parm["revision"]) + elif "label" in parm: + depot += "@%s" % (parm["label"]) + elif p4date: + depot += "@%s" % (p4date) + + p4cmd = data.getVar('FETCHCMD_p4', d, True) + logger.debug(1, "Running %s%s changes -m 1 %s", p4cmd, p4opt, depot) + p4file, errors = bb.process.run("%s%s changes -m 1 %s" % (p4cmd, p4opt, depot)) + cset = p4file.strip() + logger.debug(1, "READ %s", cset) + if not cset: + return -1 + + return cset.split(' ')[1] + getcset = staticmethod(getcset) + + def urldata_init(self, ud, d): + (host, path, user, pswd, parm) = Perforce.doparse(ud.url, d) + + # If a label is specified, we use that as our filename + + if "label" in parm: + ud.localfile = "%s.tar.gz" % (parm["label"]) + return + + base = path + which = path.find('/...') + if which != -1: + base = path[:which-1] + + base = self._strip_leading_slashes(base) + + cset = Perforce.getcset(d, path, host, user, pswd, parm) + + ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host, base.replace('/', '.'), cset), d) + + def download(self, ud, d): + """ + Fetch urls + """ + + (host, depot, user, pswd, parm) = Perforce.doparse(ud.url, d) + + if depot.find('/...') != -1: + path = depot[:depot.find('/...')] + else: + path = depot + + module = parm.get('module', os.path.basename(path)) + + # Get the p4 command + p4opt = "" + if user: + p4opt += " -u %s" % (user) + + if pswd: + p4opt += " -P %s" % (pswd) + + if host: + p4opt += " -p %s" % (host) + + p4cmd = data.getVar('FETCHCMD_p4', d, True) + + # create temp directory + logger.debug(2, "Fetch: creating temporary directory") + bb.utils.mkdirhier(d.expand('${WORKDIR}')) + mktemp = d.getVar("FETCHCMD_p4mktemp", True) or d.expand("mktemp -d -q '${WORKDIR}/oep4.XXXXXX'") + tmpfile, errors = bb.process.run(mktemp) + tmpfile = tmpfile.strip() + if not tmpfile: + raise FetchError("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.", ud.url) + + if "label" in parm: + depot = "%s@%s" % (depot, parm["label"]) + else: + cset = Perforce.getcset(d, depot, host, user, pswd, parm) + depot = "%s@%s" % (depot, cset) + + os.chdir(tmpfile) + logger.info("Fetch " + ud.url) + logger.info("%s%s files %s", p4cmd, p4opt, depot) + p4file, errors = bb.process.run("%s%s files %s" % (p4cmd, p4opt, depot)) + p4file = [f.rstrip() for f in p4file.splitlines()] + + if not p4file: + raise FetchError("Fetch: unable to get the P4 files from %s" % depot, ud.url) + + count = 0 + + for file in p4file: + list = file.split() + + if list[2] == "delete": + continue + + dest = list[0][len(path)+1:] + where = dest.find("#") + + subprocess.call("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0]), shell=True) + count = count + 1 + + if count == 0: + logger.error() + raise FetchError("Fetch: No files gathered from the P4 fetch", ud.url) + + runfetchcmd("tar -czf %s %s" % (ud.localpath, module), d, cleanup = [ud.localpath]) + # cleanup + bb.utils.prunedir(tmpfile) diff --git a/bitbake/lib/bb/fetch2/repo.py b/bitbake/lib/bb/fetch2/repo.py new file mode 100644 index 0000000000..21678eb7d9 --- /dev/null +++ b/bitbake/lib/bb/fetch2/repo.py @@ -0,0 +1,98 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake "Fetch" repo (git) implementation + +""" + +# Copyright (C) 2009 Tom Rini +# +# Based on git.py which is: +#Copyright (C) 2005 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os +import bb +from bb import data +from bb.fetch2 import FetchMethod +from bb.fetch2 import runfetchcmd + +class Repo(FetchMethod): + """Class to fetch a module or modules from repo (git) repositories""" + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with repo. + """ + return ud.type in ["repo"] + + def urldata_init(self, ud, d): + """ + We don"t care about the git rev of the manifests repository, but + we do care about the manifest to use. The default is "default". + We also care about the branch or tag to be used. The default is + "master". + """ + + ud.proto = ud.parm.get('protocol', 'git') + ud.branch = ud.parm.get('branch', 'master') + ud.manifest = ud.parm.get('manifest', 'default.xml') + if not ud.manifest.endswith('.xml'): + ud.manifest += '.xml' + + ud.localfile = data.expand("repo_%s%s_%s_%s.tar.gz" % (ud.host, ud.path.replace("/", "."), ud.manifest, ud.branch), d) + + def download(self, ud, d): + """Fetch url""" + + if os.access(os.path.join(data.getVar("DL_DIR", d, True), ud.localfile), os.R_OK): + logger.debug(1, "%s already exists (or was stashed). Skipping repo init / sync.", ud.localpath) + return + + gitsrcname = "%s%s" % (ud.host, ud.path.replace("/", ".")) + repodir = data.getVar("REPODIR", d, True) or os.path.join(data.getVar("DL_DIR", d, True), "repo") + codir = os.path.join(repodir, gitsrcname, ud.manifest) + + if ud.user: + username = ud.user + "@" + else: + username = "" + + bb.utils.mkdirhier(os.path.join(codir, "repo")) + os.chdir(os.path.join(codir, "repo")) + if not os.path.exists(os.path.join(codir, "repo", ".repo")): + bb.fetch2.check_network_access(d, "repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), ud.url) + runfetchcmd("repo init -m %s -b %s -u %s://%s%s%s" % (ud.manifest, ud.branch, ud.proto, username, ud.host, ud.path), d) + + bb.fetch2.check_network_access(d, "repo sync %s" % ud.url, ud.url) + runfetchcmd("repo sync", d) + os.chdir(codir) + + scmdata = ud.parm.get("scmdata", "") + if scmdata == "keep": + tar_flags = "" + else: + tar_flags = "--exclude '.repo' --exclude '.git'" + + # Create a cache + runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, os.path.join(".", "*") ), d) + + def supports_srcrev(self): + return False + + def _build_revision(self, ud, d): + return ud.manifest + + def _want_sortable_revision(self, ud, d): + return False diff --git a/bitbake/lib/bb/fetch2/sftp.py b/bitbake/lib/bb/fetch2/sftp.py new file mode 100644 index 0000000000..8ea4ef2ff3 --- /dev/null +++ b/bitbake/lib/bb/fetch2/sftp.py @@ -0,0 +1,129 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake SFTP Fetch implementation + +Class for fetching files via SFTP. It tries to adhere to the (now +expired) IETF Internet Draft for "Uniform Resource Identifier (URI) +Scheme for Secure File Transfer Protocol (SFTP) and Secure Shell +(SSH)" (SECSH URI). + +It uses SFTP (as to adhere to the SECSH URI specification). It only +supports key based authentication, not password. This class, unlike +the SSH fetcher, does not support fetching a directory tree from the +remote. + + http://tools.ietf.org/html/draft-ietf-secsh-scp-sftp-ssh-uri-04 + https://www.iana.org/assignments/uri-schemes/prov/sftp + https://tools.ietf.org/html/draft-ietf-secsh-filexfer-13 + +Please note that '/' is used as host path seperator, and not ":" +as you may be used to from the scp/sftp commands. You can use a +~ (tilde) to specify a path relative to your home directory. +(The /~user/ syntax, for specyfing a path relative to another +user's home directory is not supported.) Note that the tilde must +still follow the host path seperator ("/"). See exampels below. + +Example SRC_URIs: + +SRC_URI = "sftp://host.example.com/dir/path.file.txt" + +A path relative to your home directory. + +SRC_URI = "sftp://host.example.com/~/dir/path.file.txt" + +You can also specify a username (specyfing password in the +URI is not supported, use SSH keys to authenticate): + +SRC_URI = "sftp://user@host.example.com/dir/path.file.txt" + +""" + +# Copyright (C) 2013, Olof Johansson +# +# Based in part on bb.fetch2.wget: +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import bb +import urllib +import commands +from bb import data +from bb.fetch2 import URI +from bb.fetch2 import FetchMethod +from bb.fetch2 import runfetchcmd + + +class SFTP(FetchMethod): + """Class to fetch urls via 'sftp'""" + + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with sftp. + """ + return ud.type in ['sftp'] + + def recommends_checksum(self, urldata): + return True + + def urldata_init(self, ud, d): + if 'protocol' in ud.parm and ud.parm['protocol'] == 'git': + raise bb.fetch2.ParameterError( + "Invalid protocol - if you wish to fetch from a " + + "git repository using ssh, you need to use the " + + "git:// prefix with protocol=ssh", ud.url) + + if 'downloadfilename' in ud.parm: + ud.basename = ud.parm['downloadfilename'] + else: + ud.basename = os.path.basename(ud.path) + + ud.localfile = data.expand(urllib.unquote(ud.basename), d) + + def download(self, ud, d): + """Fetch urls""" + + urlo = URI(ud.url) + basecmd = 'sftp -oPasswordAuthentication=no' + port = '' + if urlo.port: + port = '-P %d' % urlo.port + urlo.port = None + + dldir = data.getVar('DL_DIR', d, True) + lpath = os.path.join(dldir, ud.localfile) + + user = '' + if urlo.userinfo: + user = urlo.userinfo + '@' + + path = urlo.path + + # Supoprt URIs relative to the user's home directory, with + # the tilde syntax. (E.g. ). + if path[:3] == '/~/': + path = path[3:] + + remote = '%s%s:%s' % (user, urlo.hostname, path) + + cmd = '%s %s %s %s' % (basecmd, port, commands.mkarg(remote), + commands.mkarg(lpath)) + + bb.fetch2.check_network_access(d, cmd, ud.url) + runfetchcmd(cmd, d) + return True diff --git a/bitbake/lib/bb/fetch2/ssh.py b/bitbake/lib/bb/fetch2/ssh.py new file mode 100644 index 0000000000..4ae979472c --- /dev/null +++ b/bitbake/lib/bb/fetch2/ssh.py @@ -0,0 +1,127 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +''' +BitBake 'Fetch' implementations + +This implementation is for Secure Shell (SSH), and attempts to comply with the +IETF secsh internet draft: + http://tools.ietf.org/wg/secsh/draft-ietf-secsh-scp-sftp-ssh-uri/ + + Currently does not support the sftp parameters, as this uses scp + Also does not support the 'fingerprint' connection parameter. + + Please note that '/' is used as host, path separator not ':' as you may + be used to, also '~' can be used to specify user HOME, but again after '/' + + Example SRC_URI: + SRC_URI = "ssh://user@host.example.com/dir/path/file.txt" + SRC_URI = "ssh://user@host.example.com/~/file.txt" +''' + +# Copyright (C) 2006 OpenedHand Ltd. +# +# +# Based in part on svk.py: +# Copyright (C) 2006 Holger Hans Peter Freyther +# Based on svn.py: +# Copyright (C) 2003, 2004 Chris Larson +# Based on functions from the base bb module: +# Copyright 2003 Holger Schurig +# +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import re, os +from bb import data +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import logger +from bb.fetch2 import runfetchcmd + + +__pattern__ = re.compile(r''' + \s* # Skip leading whitespace + ssh:// # scheme + ( # Optional username/password block + (?P\S+) # username + (:(?P\S+))? # colon followed by the password (optional) + )? + (?P(;[^;]+)*)? # connection parameters block (optional) + @ + (?P\S+?) # non-greedy match of the host + (:(?P[0-9]+))? # colon followed by the port (optional) + / + (?P[^;]+) # path on the remote system, may be absolute or relative, + # and may include the use of '~' to reference the remote home + # directory + (?P(;[^;]+)*)? # parameters block (optional) + $ +''', re.VERBOSE) + +class SSH(FetchMethod): + '''Class to fetch a module or modules via Secure Shell''' + + def supports(self, urldata, d): + return __pattern__.match(urldata.url) != None + + def supports_checksum(self, urldata): + return False + + def urldata_init(self, urldata, d): + if 'protocol' in urldata.parm and urldata.parm['protocol'] == 'git': + raise bb.fetch2.ParameterError( + "Invalid protocol - if you wish to fetch from a git " + + "repository using ssh, you need to use " + + "git:// prefix with protocol=ssh", urldata.url) + m = __pattern__.match(urldata.url) + path = m.group('path') + host = m.group('host') + urldata.localpath = os.path.join(d.getVar('DL_DIR', True), os.path.basename(path)) + + def download(self, urldata, d): + dldir = d.getVar('DL_DIR', True) + + m = __pattern__.match(urldata.url) + path = m.group('path') + host = m.group('host') + port = m.group('port') + user = m.group('user') + password = m.group('pass') + + if port: + portarg = '-P %s' % port + else: + portarg = '' + + if user: + fr = user + if password: + fr += ':%s' % password + fr += '@%s' % host + else: + fr = host + fr += ':%s' % path + + + import commands + cmd = 'scp -B -r %s %s %s/' % ( + portarg, + commands.mkarg(fr), + commands.mkarg(dldir) + ) + + bb.fetch2.check_network_access(d, cmd, urldata.url) + + runfetchcmd(cmd, d) + diff --git a/bitbake/lib/bb/fetch2/svn.py b/bitbake/lib/bb/fetch2/svn.py new file mode 100644 index 0000000000..8847461913 --- /dev/null +++ b/bitbake/lib/bb/fetch2/svn.py @@ -0,0 +1,191 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementation for svn. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2004 Marcin Juszkiewicz +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import sys +import logging +import bb +import re +from bb import data +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import MissingParameterError +from bb.fetch2 import runfetchcmd +from bb.fetch2 import logger + +class Svn(FetchMethod): + """Class to fetch a module or modules from svn repositories""" + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with svn. + """ + return ud.type in ['svn'] + + def urldata_init(self, ud, d): + """ + init svn specific variable within url data + """ + if not "module" in ud.parm: + raise MissingParameterError('module', ud.url) + + ud.basecmd = d.getVar('FETCHCMD_svn', True) + + ud.module = ud.parm["module"] + + # Create paths to svn checkouts + relpath = self._strip_leading_slashes(ud.path) + ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath) + ud.moddir = os.path.join(ud.pkgdir, ud.module) + + ud.setup_revisons(d) + + if 'rev' in ud.parm: + ud.revision = ud.parm['rev'] + + ud.localfile = data.expand('%s_%s_%s_%s_.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d) + + def _buildsvncommand(self, ud, d, command): + """ + Build up an svn commandline based on ud + command is "fetch", "update", "info" + """ + + proto = ud.parm.get('protocol', 'svn') + + svn_rsh = None + if proto == "svn+ssh" and "rsh" in ud.parm: + svn_rsh = ud.parm["rsh"] + + svnroot = ud.host + ud.path + + options = [] + + options.append("--no-auth-cache") + + if ud.user: + options.append("--username %s" % ud.user) + + if ud.pswd: + options.append("--password %s" % ud.pswd) + + if command == "info": + svncmd = "%s info %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module) + elif command == "log1": + svncmd = "%s log --limit 1 %s %s://%s/%s/" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module) + else: + suffix = "" + if ud.revision: + options.append("-r %s" % ud.revision) + suffix = "@%s" % (ud.revision) + + if command == "fetch": + svncmd = "%s co %s %s://%s/%s%s %s" % (ud.basecmd, " ".join(options), proto, svnroot, ud.module, suffix, ud.module) + elif command == "update": + svncmd = "%s update %s" % (ud.basecmd, " ".join(options)) + else: + raise FetchError("Invalid svn command %s" % command, ud.url) + + if svn_rsh: + svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd) + + return svncmd + + def download(self, ud, d): + """Fetch url""" + + logger.debug(2, "Fetch: checking for module directory '" + ud.moddir + "'") + + if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK): + svnupdatecmd = self._buildsvncommand(ud, d, "update") + logger.info("Update " + ud.url) + # update sources there + os.chdir(ud.moddir) + # We need to attempt to run svn upgrade first in case its an older working format + try: + runfetchcmd(ud.basecmd + " upgrade", d) + except FetchError: + pass + logger.debug(1, "Running %s", svnupdatecmd) + bb.fetch2.check_network_access(d, svnupdatecmd, ud.url) + runfetchcmd(svnupdatecmd, d) + else: + svnfetchcmd = self._buildsvncommand(ud, d, "fetch") + logger.info("Fetch " + ud.url) + # check out sources there + bb.utils.mkdirhier(ud.pkgdir) + os.chdir(ud.pkgdir) + logger.debug(1, "Running %s", svnfetchcmd) + bb.fetch2.check_network_access(d, svnfetchcmd, ud.url) + runfetchcmd(svnfetchcmd, d) + + scmdata = ud.parm.get("scmdata", "") + if scmdata == "keep": + tar_flags = "" + else: + tar_flags = "--exclude '.svn'" + + os.chdir(ud.pkgdir) + # tar them up to a defined filename + runfetchcmd("tar %s -czf %s %s" % (tar_flags, ud.localpath, ud.module), d, cleanup = [ud.localpath]) + + def clean(self, ud, d): + """ Clean SVN specific files and dirs """ + + bb.utils.remove(ud.localpath) + bb.utils.remove(ud.moddir, True) + + + def supports_srcrev(self): + return True + + def _revision_key(self, ud, d, name): + """ + Return a unique key for the url + """ + return "svn:" + ud.moddir + + def _latest_revision(self, ud, d, name): + """ + Return the latest upstream revision number + """ + bb.fetch2.check_network_access(d, self._buildsvncommand(ud, d, "log1")) + + output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "log1"), d, True) + + # skip the first line, as per output of svn log + # then we expect the revision on the 2nd line + revision = re.search('^r([0-9]*)', output.splitlines()[1]).group(1) + + return revision + + def sortable_revision(self, ud, d, name): + """ + Return a sortable revision number which in our case is the revision number + """ + + return False, self._build_revision(ud, d) + + def _build_revision(self, ud, d): + return ud.revision diff --git a/bitbake/lib/bb/fetch2/wget.py b/bitbake/lib/bb/fetch2/wget.py new file mode 100644 index 0000000000..0456490368 --- /dev/null +++ b/bitbake/lib/bb/fetch2/wget.py @@ -0,0 +1,106 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'Fetch' implementations + +Classes for obtaining upstream sources for the +BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +import os +import logging +import bb +import urllib +from bb import data +from bb.fetch2 import FetchMethod +from bb.fetch2 import FetchError +from bb.fetch2 import logger +from bb.fetch2 import runfetchcmd + +class Wget(FetchMethod): + """Class to fetch urls via 'wget'""" + def supports(self, ud, d): + """ + Check to see if a given url can be fetched with wget. + """ + return ud.type in ['http', 'https', 'ftp'] + + def recommends_checksum(self, urldata): + return True + + def urldata_init(self, ud, d): + if 'protocol' in ud.parm: + if ud.parm['protocol'] == 'git': + raise bb.fetch2.ParameterError("Invalid protocol - if you wish to fetch from a git repository using http, you need to instead use the git:// prefix with protocol=http", ud.url) + + if 'downloadfilename' in ud.parm: + ud.basename = ud.parm['downloadfilename'] + else: + ud.basename = os.path.basename(ud.path) + + ud.localfile = data.expand(urllib.unquote(ud.basename), d) + + self.basecmd = d.getVar("FETCHCMD_wget", True) or "/usr/bin/env wget -t 2 -T 30 -nv --passive-ftp --no-check-certificate" + + def _runwget(self, ud, d, command, quiet): + + logger.debug(2, "Fetching %s using command '%s'" % (ud.url, command)) + bb.fetch2.check_network_access(d, command) + runfetchcmd(command, d, quiet) + + def download(self, ud, d): + """Fetch urls""" + + fetchcmd = self.basecmd + + if 'downloadfilename' in ud.parm: + dldir = d.getVar("DL_DIR", True) + bb.utils.mkdirhier(os.path.dirname(dldir + os.sep + ud.localfile)) + fetchcmd += " -O " + dldir + os.sep + ud.localfile + + uri = ud.url.split(";")[0] + if os.path.exists(ud.localpath): + # file exists, but we didnt complete it.. trying again.. + fetchcmd += d.expand(" -c -P ${DL_DIR} '%s'" % uri) + else: + fetchcmd += d.expand(" -P ${DL_DIR} '%s'" % uri) + + self._runwget(ud, d, fetchcmd, False) + + # Sanity check since wget can pretend it succeed when it didn't + # Also, this used to happen if sourceforge sent us to the mirror page + if not os.path.exists(ud.localpath): + raise FetchError("The fetch command returned success for url %s but %s doesn't exist?!" % (uri, ud.localpath), uri) + + if os.path.getsize(ud.localpath) == 0: + os.remove(ud.localpath) + raise FetchError("The fetch of %s resulted in a zero size file?! Deleting and failing since this isn't right." % (uri), uri) + + return True + + def checkstatus(self, ud, d): + + uri = ud.url.split(";")[0] + fetchcmd = self.basecmd + " --spider '%s'" % uri + + self._runwget(ud, d, fetchcmd, True) + + return True diff --git a/bitbake/lib/bb/methodpool.py b/bitbake/lib/bb/methodpool.py new file mode 100644 index 0000000000..bf2e9f5542 --- /dev/null +++ b/bitbake/lib/bb/methodpool.py @@ -0,0 +1,29 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# +# Copyright (C) 2006 Holger Hans Peter Freyther +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from bb.utils import better_compile, better_exec + +def insert_method(modulename, code, fn): + """ + Add code of a module should be added. The methods + will be simply added, no checking will be done + """ + comp = better_compile(code, modulename, fn ) + better_exec(comp, None, code, fn) + diff --git a/bitbake/lib/bb/monitordisk.py b/bitbake/lib/bb/monitordisk.py new file mode 100644 index 0000000000..fca43eefd0 --- /dev/null +++ b/bitbake/lib/bb/monitordisk.py @@ -0,0 +1,265 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Copyright (C) 2012 Robert Yang +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import os, logging, re, sys +import bb +logger = logging.getLogger("BitBake.Monitor") + +def printErr(info): + logger.error("%s\n Disk space monitor will NOT be enabled" % info) + +def convertGMK(unit): + + """ Convert the space unit G, M, K, the unit is case-insensitive """ + + unitG = re.match('([1-9][0-9]*)[gG]\s?$', unit) + if unitG: + return int(unitG.group(1)) * (1024 ** 3) + unitM = re.match('([1-9][0-9]*)[mM]\s?$', unit) + if unitM: + return int(unitM.group(1)) * (1024 ** 2) + unitK = re.match('([1-9][0-9]*)[kK]\s?$', unit) + if unitK: + return int(unitK.group(1)) * 1024 + unitN = re.match('([1-9][0-9]*)\s?$', unit) + if unitN: + return int(unitN.group(1)) + else: + return None + +def getMountedDev(path): + + """ Get the device mounted at the path, uses /proc/mounts """ + + # Get the mount point of the filesystem containing path + # st_dev is the ID of device containing file + parentDev = os.stat(path).st_dev + currentDev = parentDev + # When the current directory's device is different from the + # parrent's, then the current directory is a mount point + while parentDev == currentDev: + mountPoint = path + # Use dirname to get the parrent's directory + path = os.path.dirname(path) + # Reach the "/" + if path == mountPoint: + break + parentDev= os.stat(path).st_dev + + try: + with open("/proc/mounts", "r") as ifp: + for line in ifp: + procLines = line.rstrip('\n').split() + if procLines[1] == mountPoint: + return procLines[0] + except EnvironmentError: + pass + return None + +def getDiskData(BBDirs, configuration): + + """Prepare disk data for disk space monitor""" + + # Save the device IDs, need the ID to be unique (the dictionary's key is + # unique), so that when more than one directories are located in the same + # device, we just monitor it once + devDict = {} + for pathSpaceInode in BBDirs.split(): + # The input format is: "dir,space,inode", dir is a must, space + # and inode are optional + pathSpaceInodeRe = re.match('([^,]*),([^,]*),([^,]*),?(.*)', pathSpaceInode) + if not pathSpaceInodeRe: + printErr("Invalid value in BB_DISKMON_DIRS: %s" % pathSpaceInode) + return None + + action = pathSpaceInodeRe.group(1) + if action not in ("ABORT", "STOPTASKS", "WARN"): + printErr("Unknown disk space monitor action: %s" % action) + return None + + path = os.path.realpath(pathSpaceInodeRe.group(2)) + if not path: + printErr("Invalid path value in BB_DISKMON_DIRS: %s" % pathSpaceInode) + return None + + # The disk space or inode is optional, but it should have a correct + # value once it is specified + minSpace = pathSpaceInodeRe.group(3) + if minSpace: + minSpace = convertGMK(minSpace) + if not minSpace: + printErr("Invalid disk space value in BB_DISKMON_DIRS: %s" % pathSpaceInodeRe.group(3)) + return None + else: + # None means that it is not specified + minSpace = None + + minInode = pathSpaceInodeRe.group(4) + if minInode: + minInode = convertGMK(minInode) + if not minInode: + printErr("Invalid inode value in BB_DISKMON_DIRS: %s" % pathSpaceInodeRe.group(4)) + return None + else: + # None means that it is not specified + minInode = None + + if minSpace is None and minInode is None: + printErr("No disk space or inode value in found BB_DISKMON_DIRS: %s" % pathSpaceInode) + return None + # mkdir for the directory since it may not exist, for example the + # DL_DIR may not exist at the very beginning + if not os.path.exists(path): + bb.utils.mkdirhier(path) + dev = getMountedDev(path) + # Use path/action as the key + devDict[os.path.join(path, action)] = [dev, minSpace, minInode] + + return devDict + +def getInterval(configuration): + + """ Get the disk space interval """ + + # The default value is 50M and 5K. + spaceDefault = 50 * 1024 * 1024 + inodeDefault = 5 * 1024 + + interval = configuration.getVar("BB_DISKMON_WARNINTERVAL", True) + if not interval: + return spaceDefault, inodeDefault + else: + # The disk space or inode interval is optional, but it should + # have a correct value once it is specified + intervalRe = re.match('([^,]*),?\s*(.*)', interval) + if intervalRe: + intervalSpace = intervalRe.group(1) + if intervalSpace: + intervalSpace = convertGMK(intervalSpace) + if not intervalSpace: + printErr("Invalid disk space interval value in BB_DISKMON_WARNINTERVAL: %s" % intervalRe.group(1)) + return None, None + else: + intervalSpace = spaceDefault + intervalInode = intervalRe.group(2) + if intervalInode: + intervalInode = convertGMK(intervalInode) + if not intervalInode: + printErr("Invalid disk inode interval value in BB_DISKMON_WARNINTERVAL: %s" % intervalRe.group(2)) + return None, None + else: + intervalInode = inodeDefault + return intervalSpace, intervalInode + else: + printErr("Invalid interval value in BB_DISKMON_WARNINTERVAL: %s" % interval) + return None, None + +class diskMonitor: + + """Prepare the disk space monitor data""" + + def __init__(self, configuration): + + self.enableMonitor = False + self.configuration = configuration + + BBDirs = configuration.getVar("BB_DISKMON_DIRS", True) or None + if BBDirs: + self.devDict = getDiskData(BBDirs, configuration) + if self.devDict: + self.spaceInterval, self.inodeInterval = getInterval(configuration) + if self.spaceInterval and self.inodeInterval: + self.enableMonitor = True + # These are for saving the previous disk free space and inode, we + # use them to avoid print too many warning messages + self.preFreeS = {} + self.preFreeI = {} + # This is for STOPTASKS and ABORT, to avoid print the message repeatly + # during waiting the tasks to finish + self.checked = {} + for k in self.devDict: + self.preFreeS[k] = 0 + self.preFreeI[k] = 0 + self.checked[k] = False + if self.spaceInterval is None and self.inodeInterval is None: + self.enableMonitor = False + + def check(self, rq): + + """ Take action for the monitor """ + + if self.enableMonitor: + for k in self.devDict: + path = os.path.dirname(k) + action = os.path.basename(k) + dev = self.devDict[k][0] + minSpace = self.devDict[k][1] + minInode = self.devDict[k][2] + + st = os.statvfs(path) + + # The free space, float point number + freeSpace = st.f_bavail * st.f_frsize + + if minSpace and freeSpace < minSpace: + # Always show warning, the self.checked would always be False if the action is WARN + if self.preFreeS[k] == 0 or self.preFreeS[k] - freeSpace > self.spaceInterval and not self.checked[k]: + logger.warn("The free space of %s (%s) is running low (%.3fGB left)" % \ + (path, dev, freeSpace / 1024 / 1024 / 1024.0)) + self.preFreeS[k] = freeSpace + + if action == "STOPTASKS" and not self.checked[k]: + logger.error("No new tasks can be executed since the disk space monitor action is \"STOPTASKS\"!") + self.checked[k] = True + rq.finish_runqueue(False) + bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration) + elif action == "ABORT" and not self.checked[k]: + logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!") + self.checked[k] = True + rq.finish_runqueue(True) + bb.event.fire(bb.event.DiskFull(dev, 'disk', freeSpace, path), self.configuration) + + # The free inodes, float point number + freeInode = st.f_favail + + if minInode and freeInode < minInode: + # Some fs formats' (e.g., btrfs) statvfs.f_files (inodes) is + # zero, this is a feature of the fs, we disable the inode + # checking for such a fs. + if st.f_files == 0: + logger.info("Inode check for %s is unavaliable, will remove it from disk monitor" % path) + self.devDict[k][2] = None + continue + # Always show warning, the self.checked would always be False if the action is WARN + if self.preFreeI[k] == 0 or self.preFreeI[k] - freeInode > self.inodeInterval and not self.checked[k]: + logger.warn("The free inode of %s (%s) is running low (%.3fK left)" % \ + (path, dev, freeInode / 1024.0)) + self.preFreeI[k] = freeInode + + if action == "STOPTASKS" and not self.checked[k]: + logger.error("No new tasks can be executed since the disk space monitor action is \"STOPTASKS\"!") + self.checked[k] = True + rq.finish_runqueue(False) + bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration) + elif action == "ABORT" and not self.checked[k]: + logger.error("Immediately abort since the disk space monitor action is \"ABORT\"!") + self.checked[k] = True + rq.finish_runqueue(True) + bb.event.fire(bb.event.DiskFull(dev, 'inode', freeInode, path), self.configuration) + return diff --git a/bitbake/lib/bb/msg.py b/bitbake/lib/bb/msg.py new file mode 100644 index 0000000000..d79768db24 --- /dev/null +++ b/bitbake/lib/bb/msg.py @@ -0,0 +1,196 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake 'msg' implementation + +Message handling infrastructure for bitbake + +""" + +# Copyright (C) 2006 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import sys +import copy +import logging +import collections +from itertools import groupby +import warnings +import bb +import bb.event + +class BBLogFormatter(logging.Formatter): + """Formatter which ensures that our 'plain' messages (logging.INFO + 1) are used as is""" + + DEBUG3 = logging.DEBUG - 2 + DEBUG2 = logging.DEBUG - 1 + DEBUG = logging.DEBUG + VERBOSE = logging.INFO - 1 + NOTE = logging.INFO + PLAIN = logging.INFO + 1 + ERROR = logging.ERROR + WARNING = logging.WARNING + CRITICAL = logging.CRITICAL + + levelnames = { + DEBUG3 : 'DEBUG', + DEBUG2 : 'DEBUG', + DEBUG : 'DEBUG', + VERBOSE: 'NOTE', + NOTE : 'NOTE', + PLAIN : '', + WARNING : 'WARNING', + ERROR : 'ERROR', + CRITICAL: 'ERROR', + } + + color_enabled = False + BASECOLOR, BLACK, RED, GREEN, YELLOW, BLUE, MAGENTA, CYAN, WHITE = range(29,38) + + COLORS = { + DEBUG3 : CYAN, + DEBUG2 : CYAN, + DEBUG : CYAN, + VERBOSE : BASECOLOR, + NOTE : BASECOLOR, + PLAIN : BASECOLOR, + WARNING : YELLOW, + ERROR : RED, + CRITICAL: RED, + } + + BLD = '\033[1;%dm' + STD = '\033[%dm' + RST = '\033[0m' + + def getLevelName(self, levelno): + try: + return self.levelnames[levelno] + except KeyError: + self.levelnames[levelno] = value = 'Level %d' % levelno + return value + + def format(self, record): + record.levelname = self.getLevelName(record.levelno) + if record.levelno == self.PLAIN: + msg = record.getMessage() + else: + if self.color_enabled: + record = self.colorize(record) + msg = logging.Formatter.format(self, record) + + if hasattr(record, 'bb_exc_info'): + etype, value, tb = record.bb_exc_info + formatted = bb.exceptions.format_exception(etype, value, tb, limit=5) + msg += '\n' + ''.join(formatted) + return msg + + def colorize(self, record): + color = self.COLORS[record.levelno] + if self.color_enabled and color is not None: + record = copy.copy(record) + record.levelname = "".join([self.BLD % color, record.levelname, self.RST]) + record.msg = "".join([self.STD % color, record.msg, self.RST]) + return record + + def enable_color(self): + self.color_enabled = True + +class BBLogFilter(object): + def __init__(self, handler, level, debug_domains): + self.stdlevel = level + self.debug_domains = debug_domains + loglevel = level + for domain in debug_domains: + if debug_domains[domain] < loglevel: + loglevel = debug_domains[domain] + handler.setLevel(loglevel) + handler.addFilter(self) + + def filter(self, record): + if record.levelno >= self.stdlevel: + return True + if record.name in self.debug_domains and record.levelno >= self.debug_domains[record.name]: + return True + return False + +class BBLogFilterStdErr(BBLogFilter): + def filter(self, record): + if not BBLogFilter.filter(self, record): + return False + if record.levelno >= logging.ERROR: + return True + return False + +class BBLogFilterStdOut(BBLogFilter): + def filter(self, record): + if not BBLogFilter.filter(self, record): + return False + if record.levelno < logging.ERROR: + return True + return False + +# Message control functions +# + +loggerDefaultDebugLevel = 0 +loggerDefaultVerbose = False +loggerVerboseLogs = False +loggerDefaultDomains = [] + +def init_msgconfig(verbose, debug, debug_domains = []): + """ + Set default verbosity and debug levels config the logger + """ + bb.msg.loggerDefaultDebugLevel = debug + bb.msg.loggerDefaultVerbose = verbose + if verbose: + bb.msg.loggerVerboseLogs = True + bb.msg.loggerDefaultDomains = debug_domains + +def constructLogOptions(): + debug = loggerDefaultDebugLevel + verbose = loggerDefaultVerbose + domains = loggerDefaultDomains + + if debug: + level = BBLogFormatter.DEBUG - debug + 1 + elif verbose: + level = BBLogFormatter.VERBOSE + else: + level = BBLogFormatter.NOTE + + debug_domains = {} + for (domainarg, iterator) in groupby(domains): + dlevel = len(tuple(iterator)) + debug_domains["BitBake.%s" % domainarg] = logging.DEBUG - dlevel + 1 + return level, debug_domains + +def addDefaultlogFilter(handler, cls = BBLogFilter): + level, debug_domains = constructLogOptions() + + cls(handler, level, debug_domains) + +# +# Message handling functions +# + +def fatal(msgdomain, msg): + if msgdomain: + logger = logging.getLogger("BitBake.%s" % msgdomain) + else: + logger = logging.getLogger("BitBake") + logger.critical(msg) + sys.exit(1) diff --git a/bitbake/lib/bb/namedtuple_with_abc.py b/bitbake/lib/bb/namedtuple_with_abc.py new file mode 100644 index 0000000000..f5e0a3f3d5 --- /dev/null +++ b/bitbake/lib/bb/namedtuple_with_abc.py @@ -0,0 +1,255 @@ +# http://code.activestate.com/recipes/577629-namedtupleabc-abstract-base-class-mix-in-for-named/ +#!/usr/bin/env python +# Copyright (c) 2011 Jan Kaliszewski (zuo). Available under the MIT License. + +""" +namedtuple_with_abc.py: +* named tuple mix-in + ABC (abstract base class) recipe, +* works under Python 2.6, 2.7 as well as 3.x. + +Import this module to patch collections.namedtuple() factory function +-- enriching it with the 'abc' attribute (an abstract base class + mix-in +for named tuples) and decorating it with a wrapper that registers each +newly created named tuple as a subclass of namedtuple.abc. + +How to import: + import collections, namedtuple_with_abc +or: + import namedtuple_with_abc + from collections import namedtuple + # ^ in this variant you must import namedtuple function + # *after* importing namedtuple_with_abc module +or simply: + from namedtuple_with_abc import namedtuple + +Simple usage example: + class Credentials(namedtuple.abc): + _fields = 'username password' + def __str__(self): + return ('{0.__class__.__name__}' + '(username={0.username}, password=...)'.format(self)) + print(Credentials("alice", "Alice's password")) + +For more advanced examples -- see below the "if __name__ == '__main__':". +""" + +import collections +from abc import ABCMeta, abstractproperty +from functools import wraps +from sys import version_info + +__all__ = ('namedtuple',) +_namedtuple = collections.namedtuple + + +class _NamedTupleABCMeta(ABCMeta): + '''The metaclass for the abstract base class + mix-in for named tuples.''' + def __new__(mcls, name, bases, namespace): + fields = namespace.get('_fields') + for base in bases: + if fields is not None: + break + fields = getattr(base, '_fields', None) + if not isinstance(fields, abstractproperty): + basetuple = _namedtuple(name, fields) + bases = (basetuple,) + bases + namespace.pop('_fields', None) + namespace.setdefault('__doc__', basetuple.__doc__) + namespace.setdefault('__slots__', ()) + return ABCMeta.__new__(mcls, name, bases, namespace) + + +exec( + # Python 2.x metaclass declaration syntax + """class _NamedTupleABC(object): + '''The abstract base class + mix-in for named tuples.''' + __metaclass__ = _NamedTupleABCMeta + _fields = abstractproperty()""" if version_info[0] < 3 else + # Python 3.x metaclass declaration syntax + """class _NamedTupleABC(metaclass=_NamedTupleABCMeta): + '''The abstract base class + mix-in for named tuples.''' + _fields = abstractproperty()""" +) + + +_namedtuple.abc = _NamedTupleABC +#_NamedTupleABC.register(type(version_info)) # (and similar, in the future...) + +@wraps(_namedtuple) +def namedtuple(*args, **kwargs): + '''Named tuple factory with namedtuple.abc subclass registration.''' + cls = _namedtuple(*args, **kwargs) + _NamedTupleABC.register(cls) + return cls + +collections.namedtuple = namedtuple + + + + +if __name__ == '__main__': + + '''Examples and explanations''' + + # Simple usage + + class MyRecord(namedtuple.abc): + _fields = 'x y z' # such form will be transformed into ('x', 'y', 'z') + def _my_custom_method(self): + return list(self._asdict().items()) + # (the '_fields' attribute belongs to the named tuple public API anyway) + + rec = MyRecord(1, 2, 3) + print(rec) + print(rec._my_custom_method()) + print(rec._replace(y=222)) + print(rec._replace(y=222)._my_custom_method()) + + # Custom abstract classes... + + class MyAbstractRecord(namedtuple.abc): + def _my_custom_method(self): + return list(self._asdict().items()) + + try: + MyAbstractRecord() # (abstract classes cannot be instantiated) + except TypeError as exc: + print(exc) + + class AnotherAbstractRecord(MyAbstractRecord): + def __str__(self): + return '<<<{0}>>>'.format(super(AnotherAbstractRecord, + self).__str__()) + + # ...and their non-abstract subclasses + + class MyRecord2(MyAbstractRecord): + _fields = 'a, b' + + class MyRecord3(AnotherAbstractRecord): + _fields = 'p', 'q', 'r' + + rec2 = MyRecord2('foo', 'bar') + print(rec2) + print(rec2._my_custom_method()) + print(rec2._replace(b=222)) + print(rec2._replace(b=222)._my_custom_method()) + + rec3 = MyRecord3('foo', 'bar', 'baz') + print(rec3) + print(rec3._my_custom_method()) + print(rec3._replace(q=222)) + print(rec3._replace(q=222)._my_custom_method()) + + # You can also subclass non-abstract ones... + + class MyRecord33(MyRecord3): + def __str__(self): + return '< {0!r}, ..., {0!r} >'.format(self.p, self.r) + + rec33 = MyRecord33('foo', 'bar', 'baz') + print(rec33) + print(rec33._my_custom_method()) + print(rec33._replace(q=222)) + print(rec33._replace(q=222)._my_custom_method()) + + # ...and even override the magic '_fields' attribute again + + class MyRecord345(MyRecord3): + _fields = 'e f g h i j k' + + rec345 = MyRecord345(1, 2, 3, 4, 3, 2, 1) + print(rec345) + print(rec345._my_custom_method()) + print(rec345._replace(f=222)) + print(rec345._replace(f=222)._my_custom_method()) + + # Mixing-in some other classes is also possible: + + class MyMixIn(object): + def method(self): + return "MyMixIn.method() called" + def _my_custom_method(self): + return "MyMixIn._my_custom_method() called" + def count(self, item): + return "MyMixIn.count({0}) called".format(item) + def _asdict(self): # (cannot override a namedtuple method, see below) + return "MyMixIn._asdict() called" + + class MyRecord4(MyRecord33, MyMixIn): # mix-in on the right + _fields = 'j k l x' + + class MyRecord5(MyMixIn, MyRecord33): # mix-in on the left + _fields = 'j k l x y' + + rec4 = MyRecord4(1, 2, 3, 2) + print(rec4) + print(rec4.method()) + print(rec4._my_custom_method()) # MyRecord33's + print(rec4.count(2)) # tuple's + print(rec4._replace(k=222)) + print(rec4._replace(k=222).method()) + print(rec4._replace(k=222)._my_custom_method()) # MyRecord33's + print(rec4._replace(k=222).count(8)) # tuple's + + rec5 = MyRecord5(1, 2, 3, 2, 1) + print(rec5) + print(rec5.method()) + print(rec5._my_custom_method()) # MyMixIn's + print(rec5.count(2)) # MyMixIn's + print(rec5._replace(k=222)) + print(rec5._replace(k=222).method()) + print(rec5._replace(k=222)._my_custom_method()) # MyMixIn's + print(rec5._replace(k=222).count(2)) # MyMixIn's + + # None that behavior: the standard namedtuple methods cannot be + # overriden by a foreign mix-in -- even if the mix-in is declared + # as the leftmost base class (but, obviously, you can override them + # in the defined class or its subclasses): + + print(rec4._asdict()) # (returns a dict, not "MyMixIn._asdict() called") + print(rec5._asdict()) # (returns a dict, not "MyMixIn._asdict() called") + + class MyRecord6(MyRecord33): + _fields = 'j k l x y z' + def _asdict(self): + return "MyRecord6._asdict() called" + rec6 = MyRecord6(1, 2, 3, 1, 2, 3) + print(rec6._asdict()) # (this returns "MyRecord6._asdict() called") + + # All that record classes are real subclasses of namedtuple.abc: + + assert issubclass(MyRecord, namedtuple.abc) + assert issubclass(MyAbstractRecord, namedtuple.abc) + assert issubclass(AnotherAbstractRecord, namedtuple.abc) + assert issubclass(MyRecord2, namedtuple.abc) + assert issubclass(MyRecord3, namedtuple.abc) + assert issubclass(MyRecord33, namedtuple.abc) + assert issubclass(MyRecord345, namedtuple.abc) + assert issubclass(MyRecord4, namedtuple.abc) + assert issubclass(MyRecord5, namedtuple.abc) + assert issubclass(MyRecord6, namedtuple.abc) + + # ...but abstract ones are not subclasses of tuple + # (and this is what you probably want): + + assert not issubclass(MyAbstractRecord, tuple) + assert not issubclass(AnotherAbstractRecord, tuple) + + assert issubclass(MyRecord, tuple) + assert issubclass(MyRecord2, tuple) + assert issubclass(MyRecord3, tuple) + assert issubclass(MyRecord33, tuple) + assert issubclass(MyRecord345, tuple) + assert issubclass(MyRecord4, tuple) + assert issubclass(MyRecord5, tuple) + assert issubclass(MyRecord6, tuple) + + # Named tuple classes created with namedtuple() factory function + # (in the "traditional" way) are registered as "virtual" subclasses + # of namedtuple.abc: + + MyTuple = namedtuple('MyTuple', 'a b c') + mt = MyTuple(1, 2, 3) + assert issubclass(MyTuple, namedtuple.abc) + assert isinstance(mt, namedtuple.abc) diff --git a/bitbake/lib/bb/parse/__init__.py b/bitbake/lib/bb/parse/__init__.py new file mode 100644 index 0000000000..e4a44dda11 --- /dev/null +++ b/bitbake/lib/bb/parse/__init__.py @@ -0,0 +1,157 @@ +""" +BitBake Parsers + +File parsers for the BitBake build tools. + +""" + + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +handlers = [] + +import os +import stat +import logging +import bb +import bb.utils +import bb.siggen + +logger = logging.getLogger("BitBake.Parsing") + +class ParseError(Exception): + """Exception raised when parsing fails""" + def __init__(self, msg, filename, lineno=0): + self.msg = msg + self.filename = filename + self.lineno = lineno + Exception.__init__(self, msg, filename, lineno) + + def __str__(self): + if self.lineno: + return "ParseError at %s:%d: %s" % (self.filename, self.lineno, self.msg) + else: + return "ParseError in %s: %s" % (self.filename, self.msg) + +class SkipPackage(Exception): + """Exception raised to skip this package""" + +__mtime_cache = {} +def cached_mtime(f): + if f not in __mtime_cache: + __mtime_cache[f] = os.stat(f)[stat.ST_MTIME] + return __mtime_cache[f] + +def cached_mtime_noerror(f): + if f not in __mtime_cache: + try: + __mtime_cache[f] = os.stat(f)[stat.ST_MTIME] + except OSError: + return 0 + return __mtime_cache[f] + +def update_mtime(f): + __mtime_cache[f] = os.stat(f)[stat.ST_MTIME] + return __mtime_cache[f] + +def mark_dependency(d, f): + if f.startswith('./'): + f = "%s/%s" % (os.getcwd(), f[2:]) + deps = (d.getVar('__depends') or []) + s = (f, cached_mtime_noerror(f)) + if s not in deps: + deps.append(s) + d.setVar('__depends', deps) + +def check_dependency(d, f): + s = (f, cached_mtime_noerror(f)) + deps = (d.getVar('__depends') or []) + return s in deps + +def supports(fn, data): + """Returns true if we have a handler for this file, false otherwise""" + for h in handlers: + if h['supports'](fn, data): + return 1 + return 0 + +def handle(fn, data, include = 0): + """Call the handler that is appropriate for this file""" + for h in handlers: + if h['supports'](fn, data): + with data.inchistory.include(fn): + return h['handle'](fn, data, include) + raise ParseError("not a BitBake file", fn) + +def init(fn, data): + for h in handlers: + if h['supports'](fn): + return h['init'](data) + +def init_parser(d): + bb.parse.siggen = bb.siggen.init(d) + +def resolve_file(fn, d): + if not os.path.isabs(fn): + bbpath = d.getVar("BBPATH", True) + newfn, attempts = bb.utils.which(bbpath, fn, history=True) + for af in attempts: + mark_dependency(d, af) + if not newfn: + raise IOError("file %s not found in %s" % (fn, bbpath)) + fn = newfn + + mark_dependency(d, fn) + if not os.path.isfile(fn): + raise IOError("file %s not found" % fn) + + logger.debug(2, "LOAD %s", fn) + return fn + +# Used by OpenEmbedded metadata +__pkgsplit_cache__={} +def vars_from_file(mypkg, d): + if not mypkg or not mypkg.endswith((".bb", ".bbappend")): + return (None, None, None) + if mypkg in __pkgsplit_cache__: + return __pkgsplit_cache__[mypkg] + + myfile = os.path.splitext(os.path.basename(mypkg)) + parts = myfile[0].split('_') + __pkgsplit_cache__[mypkg] = parts + if len(parts) > 3: + raise ParseError("Unable to generate default variables from filename (too many underscores)", mypkg) + exp = 3 - len(parts) + tmplist = [] + while exp != 0: + exp -= 1 + tmplist.append(None) + parts.extend(tmplist) + return parts + +def get_file_depends(d): + '''Return the dependent files''' + dep_files = [] + depends = d.getVar('__base_depends', True) or [] + depends = depends + (d.getVar('__depends', True) or []) + for (fn, _) in depends: + dep_files.append(os.path.abspath(fn)) + return " ".join(dep_files) + +from bb.parse.parse_py import __version__, ConfHandler, BBHandler diff --git a/bitbake/lib/bb/parse/ast.py b/bitbake/lib/bb/parse/ast.py new file mode 100644 index 0000000000..d8c141b37c --- /dev/null +++ b/bitbake/lib/bb/parse/ast.py @@ -0,0 +1,478 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" + AbstractSyntaxTree classes for the Bitbake language +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2009 Holger Hans Peter Freyther +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from __future__ import absolute_import +from future_builtins import filter +import re +import string +import logging +import bb +import itertools +from bb import methodpool +from bb.parse import logger + +_bbversions_re = re.compile(r"\[(?P[0-9]+)-(?P[0-9]+)\]") + +class StatementGroup(list): + def eval(self, data): + for statement in self: + statement.eval(data) + +class AstNode(object): + def __init__(self, filename, lineno): + self.filename = filename + self.lineno = lineno + +class IncludeNode(AstNode): + def __init__(self, filename, lineno, what_file, force): + AstNode.__init__(self, filename, lineno) + self.what_file = what_file + self.force = force + + def eval(self, data): + """ + Include the file and evaluate the statements + """ + s = data.expand(self.what_file) + logger.debug(2, "CONF %s:%s: including %s", self.filename, self.lineno, s) + + # TODO: Cache those includes... maybe not here though + if self.force: + bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, "include required") + else: + bb.parse.ConfHandler.include(self.filename, s, self.lineno, data, False) + +class ExportNode(AstNode): + def __init__(self, filename, lineno, var): + AstNode.__init__(self, filename, lineno) + self.var = var + + def eval(self, data): + data.setVarFlag(self.var, "export", 1, op = 'exported') + +class DataNode(AstNode): + """ + Various data related updates. For the sake of sanity + we have one class doing all this. This means that all + this need to be re-evaluated... we might be able to do + that faster with multiple classes. + """ + def __init__(self, filename, lineno, groupd): + AstNode.__init__(self, filename, lineno) + self.groupd = groupd + + def getFunc(self, key, data): + if 'flag' in self.groupd and self.groupd['flag'] != None: + return data.getVarFlag(key, self.groupd['flag'], noweakdefault=True) + else: + return data.getVar(key, noweakdefault=True) + + def eval(self, data): + groupd = self.groupd + key = groupd["var"] + loginfo = { + 'variable': key, + 'file': self.filename, + 'line': self.lineno, + } + if "exp" in groupd and groupd["exp"] != None: + data.setVarFlag(key, "export", 1, op = 'exported', **loginfo) + + op = "set" + if "ques" in groupd and groupd["ques"] != None: + val = self.getFunc(key, data) + op = "set?" + if val == None: + val = groupd["value"] + elif "colon" in groupd and groupd["colon"] != None: + e = data.createCopy() + bb.data.update_data(e) + op = "immediate" + val = e.expand(groupd["value"], key + "[:=]") + elif "append" in groupd and groupd["append"] != None: + op = "append" + val = "%s %s" % ((self.getFunc(key, data) or ""), groupd["value"]) + elif "prepend" in groupd and groupd["prepend"] != None: + op = "prepend" + val = "%s %s" % (groupd["value"], (self.getFunc(key, data) or "")) + elif "postdot" in groupd and groupd["postdot"] != None: + op = "postdot" + val = "%s%s" % ((self.getFunc(key, data) or ""), groupd["value"]) + elif "predot" in groupd and groupd["predot"] != None: + op = "predot" + val = "%s%s" % (groupd["value"], (self.getFunc(key, data) or "")) + else: + val = groupd["value"] + + flag = None + if 'flag' in groupd and groupd['flag'] != None: + flag = groupd['flag'] + elif groupd["lazyques"]: + flag = "defaultval" + + loginfo['op'] = op + loginfo['detail'] = groupd["value"] + + if flag: + data.setVarFlag(key, flag, val, **loginfo) + else: + data.setVar(key, val, **loginfo) + +class MethodNode(AstNode): + tr_tbl = string.maketrans('/.+-@%', '______') + + def __init__(self, filename, lineno, func_name, body): + AstNode.__init__(self, filename, lineno) + self.func_name = func_name + self.body = body + + def eval(self, data): + text = '\n'.join(self.body) + if self.func_name == "__anonymous": + funcname = ("__anon_%s_%s" % (self.lineno, self.filename.translate(MethodNode.tr_tbl))) + text = "def %s(d):\n" % (funcname) + text + bb.methodpool.insert_method(funcname, text, self.filename) + anonfuncs = data.getVar('__BBANONFUNCS') or [] + anonfuncs.append(funcname) + data.setVar('__BBANONFUNCS', anonfuncs) + data.setVar(funcname, text) + else: + data.setVarFlag(self.func_name, "func", 1) + data.setVar(self.func_name, text) + +class PythonMethodNode(AstNode): + def __init__(self, filename, lineno, function, modulename, body): + AstNode.__init__(self, filename, lineno) + self.function = function + self.modulename = modulename + self.body = body + + def eval(self, data): + # Note we will add root to parsedmethods after having parse + # 'this' file. This means we will not parse methods from + # bb classes twice + text = '\n'.join(self.body) + bb.methodpool.insert_method(self.modulename, text, self.filename) + data.setVarFlag(self.function, "func", 1) + data.setVarFlag(self.function, "python", 1) + data.setVar(self.function, text) + +class MethodFlagsNode(AstNode): + def __init__(self, filename, lineno, key, m): + AstNode.__init__(self, filename, lineno) + self.key = key + self.m = m + + def eval(self, data): + if data.getVar(self.key): + # clean up old version of this piece of metadata, as its + # flags could cause problems + data.setVarFlag(self.key, 'python', None) + data.setVarFlag(self.key, 'fakeroot', None) + if self.m.group("py") is not None: + data.setVarFlag(self.key, "python", "1") + else: + data.delVarFlag(self.key, "python") + if self.m.group("fr") is not None: + data.setVarFlag(self.key, "fakeroot", "1") + else: + data.delVarFlag(self.key, "fakeroot") + +class ExportFuncsNode(AstNode): + def __init__(self, filename, lineno, fns, classname): + AstNode.__init__(self, filename, lineno) + self.n = fns.split() + self.classname = classname + + def eval(self, data): + + for func in self.n: + calledfunc = self.classname + "_" + func + + if data.getVar(func) and not data.getVarFlag(func, 'export_func'): + continue + + if data.getVar(func): + data.setVarFlag(func, 'python', None) + data.setVarFlag(func, 'func', None) + + for flag in [ "func", "python" ]: + if data.getVarFlag(calledfunc, flag): + data.setVarFlag(func, flag, data.getVarFlag(calledfunc, flag)) + for flag in [ "dirs" ]: + if data.getVarFlag(func, flag): + data.setVarFlag(calledfunc, flag, data.getVarFlag(func, flag)) + + if data.getVarFlag(calledfunc, "python"): + data.setVar(func, " bb.build.exec_func('" + calledfunc + "', d)\n") + else: + data.setVar(func, " " + calledfunc + "\n") + data.setVarFlag(func, 'export_func', '1') + +class AddTaskNode(AstNode): + def __init__(self, filename, lineno, func, before, after): + AstNode.__init__(self, filename, lineno) + self.func = func + self.before = before + self.after = after + + def eval(self, data): + bb.build.addtask(self.func, self.before, self.after, data) + +class DelTaskNode(AstNode): + def __init__(self, filename, lineno, func): + AstNode.__init__(self, filename, lineno) + self.func = func + + def eval(self, data): + bb.build.deltask(self.func, data) + +class BBHandlerNode(AstNode): + def __init__(self, filename, lineno, fns): + AstNode.__init__(self, filename, lineno) + self.hs = fns.split() + + def eval(self, data): + bbhands = data.getVar('__BBHANDLERS') or [] + for h in self.hs: + bbhands.append(h) + data.setVarFlag(h, "handler", 1) + data.setVar('__BBHANDLERS', bbhands) + +class InheritNode(AstNode): + def __init__(self, filename, lineno, classes): + AstNode.__init__(self, filename, lineno) + self.classes = classes + + def eval(self, data): + bb.parse.BBHandler.inherit(self.classes, self.filename, self.lineno, data) + +def handleInclude(statements, filename, lineno, m, force): + statements.append(IncludeNode(filename, lineno, m.group(1), force)) + +def handleExport(statements, filename, lineno, m): + statements.append(ExportNode(filename, lineno, m.group(1))) + +def handleData(statements, filename, lineno, groupd): + statements.append(DataNode(filename, lineno, groupd)) + +def handleMethod(statements, filename, lineno, func_name, body): + statements.append(MethodNode(filename, lineno, func_name, body)) + +def handlePythonMethod(statements, filename, lineno, funcname, modulename, body): + statements.append(PythonMethodNode(filename, lineno, funcname, modulename, body)) + +def handleMethodFlags(statements, filename, lineno, key, m): + statements.append(MethodFlagsNode(filename, lineno, key, m)) + +def handleExportFuncs(statements, filename, lineno, m, classname): + statements.append(ExportFuncsNode(filename, lineno, m.group(1), classname)) + +def handleAddTask(statements, filename, lineno, m): + func = m.group("func") + before = m.group("before") + after = m.group("after") + if func is None: + return + + statements.append(AddTaskNode(filename, lineno, func, before, after)) + +def handleDelTask(statements, filename, lineno, m): + func = m.group("func") + if func is None: + return + + statements.append(DelTaskNode(filename, lineno, func)) + +def handleBBHandlers(statements, filename, lineno, m): + statements.append(BBHandlerNode(filename, lineno, m.group(1))) + +def handleInherit(statements, filename, lineno, m): + classes = m.group(1) + statements.append(InheritNode(filename, lineno, classes)) + +def finalize(fn, d, variant = None): + all_handlers = {} + for var in d.getVar('__BBHANDLERS') or []: + # try to add the handler + bb.event.register(var, d.getVar(var), (d.getVarFlag(var, "eventmask", True) or "").split()) + + bb.event.fire(bb.event.RecipePreFinalise(fn), d) + + bb.data.expandKeys(d) + bb.data.update_data(d) + code = [] + for funcname in d.getVar("__BBANONFUNCS") or []: + code.append("%s(d)" % funcname) + bb.utils.better_exec("\n".join(code), {"d": d}) + bb.data.update_data(d) + + tasklist = d.getVar('__BBTASKS') or [] + deltasklist = d.getVar('__BBDELTASKS') or [] + bb.build.add_tasks(tasklist, deltasklist, d) + + bb.parse.siggen.finalise(fn, d, variant) + + d.setVar('BBINCLUDED', bb.parse.get_file_depends(d)) + + bb.event.fire(bb.event.RecipeParsed(fn), d) + +def _create_variants(datastores, names, function): + def create_variant(name, orig_d, arg = None): + new_d = bb.data.createCopy(orig_d) + function(arg or name, new_d) + datastores[name] = new_d + + for variant, variant_d in datastores.items(): + for name in names: + if not variant: + # Based on main recipe + create_variant(name, variant_d) + else: + create_variant("%s-%s" % (variant, name), variant_d, name) + +def _expand_versions(versions): + def expand_one(version, start, end): + for i in xrange(start, end + 1): + ver = _bbversions_re.sub(str(i), version, 1) + yield ver + + versions = iter(versions) + while True: + try: + version = next(versions) + except StopIteration: + break + + range_ver = _bbversions_re.search(version) + if not range_ver: + yield version + else: + newversions = expand_one(version, int(range_ver.group("from")), + int(range_ver.group("to"))) + versions = itertools.chain(newversions, versions) + +def multi_finalize(fn, d): + appends = (d.getVar("__BBAPPEND", True) or "").split() + for append in appends: + logger.debug(2, "Appending .bbappend file %s to %s", append, fn) + bb.parse.BBHandler.handle(append, d, True) + + onlyfinalise = d.getVar("__ONLYFINALISE", False) + + safe_d = d + d = bb.data.createCopy(safe_d) + try: + finalize(fn, d) + except bb.parse.SkipPackage as e: + d.setVar("__SKIPPED", e.args[0]) + datastores = {"": safe_d} + + versions = (d.getVar("BBVERSIONS", True) or "").split() + if versions: + pv = orig_pv = d.getVar("PV", True) + baseversions = {} + + def verfunc(ver, d, pv_d = None): + if pv_d is None: + pv_d = d + + overrides = d.getVar("OVERRIDES", True).split(":") + pv_d.setVar("PV", ver) + overrides.append(ver) + bpv = baseversions.get(ver) or orig_pv + pv_d.setVar("BPV", bpv) + overrides.append(bpv) + d.setVar("OVERRIDES", ":".join(overrides)) + + versions = list(_expand_versions(versions)) + for pos, version in enumerate(list(versions)): + try: + pv, bpv = version.split(":", 2) + except ValueError: + pass + else: + versions[pos] = pv + baseversions[pv] = bpv + + if pv in versions and not baseversions.get(pv): + versions.remove(pv) + else: + pv = versions.pop() + + # This is necessary because our existing main datastore + # has already been finalized with the old PV, we need one + # that's been finalized with the new PV. + d = bb.data.createCopy(safe_d) + verfunc(pv, d, safe_d) + try: + finalize(fn, d) + except bb.parse.SkipPackage as e: + d.setVar("__SKIPPED", e.args[0]) + + _create_variants(datastores, versions, verfunc) + + extended = d.getVar("BBCLASSEXTEND", True) or "" + if extended: + # the following is to support bbextends with arguments, for e.g. multilib + # an example is as follows: + # BBCLASSEXTEND = "multilib:lib32" + # it will create foo-lib32, inheriting multilib.bbclass and set + # BBEXTENDCURR to "multilib" and BBEXTENDVARIANT to "lib32" + extendedmap = {} + variantmap = {} + + for ext in extended.split(): + eext = ext.split(':', 2) + if len(eext) > 1: + extendedmap[ext] = eext[0] + variantmap[ext] = eext[1] + else: + extendedmap[ext] = ext + + pn = d.getVar("PN", True) + def extendfunc(name, d): + if name != extendedmap[name]: + d.setVar("BBEXTENDCURR", extendedmap[name]) + d.setVar("BBEXTENDVARIANT", variantmap[name]) + else: + d.setVar("PN", "%s-%s" % (pn, name)) + bb.parse.BBHandler.inherit(extendedmap[name], fn, 0, d) + + safe_d.setVar("BBCLASSEXTEND", extended) + _create_variants(datastores, extendedmap.keys(), extendfunc) + + for variant, variant_d in datastores.iteritems(): + if variant: + try: + if not onlyfinalise or variant in onlyfinalise: + finalize(fn, variant_d, variant) + except bb.parse.SkipPackage as e: + variant_d.setVar("__SKIPPED", e.args[0]) + + if len(datastores) > 1: + variants = filter(None, datastores.iterkeys()) + safe_d.setVar("__VARIANTS", " ".join(variants)) + + datastores[""] = d + return datastores diff --git a/bitbake/lib/bb/parse/parse_py/BBHandler.py b/bitbake/lib/bb/parse/parse_py/BBHandler.py new file mode 100644 index 0000000000..408890e48a --- /dev/null +++ b/bitbake/lib/bb/parse/parse_py/BBHandler.py @@ -0,0 +1,267 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" + class for handling .bb files + + Reads a .bb file and obtains its metadata + +""" + + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +from __future__ import absolute_import +import re, bb, os +import logging +import bb.build, bb.utils +from bb import data + +from . import ConfHandler +from .. import resolve_file, ast, logger +from .ConfHandler import include, init + +# For compatibility +bb.deprecate_import(__name__, "bb.parse", ["vars_from_file"]) + +__func_start_regexp__ = re.compile( r"(((?Ppython)|(?Pfakeroot))\s*)*(?P[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" ) +__inherit_regexp__ = re.compile( r"inherit\s+(.+)" ) +__export_func_regexp__ = re.compile( r"EXPORT_FUNCTIONS\s+(.+)" ) +__addtask_regexp__ = re.compile("addtask\s+(?P\w+)\s*((before\s*(?P((.*(?=after))|(.*))))|(after\s*(?P((.*(?=before))|(.*)))))*") +__deltask_regexp__ = re.compile("deltask\s+(?P\w+)") +__addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" ) +__def_regexp__ = re.compile( r"def\s+(\w+).*:" ) +__python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" ) + + +__infunc__ = "" +__inpython__ = False +__body__ = [] +__classname__ = "" + +cached_statements = {} + +# We need to indicate EOF to the feeder. This code is so messy that +# factoring it out to a close_parse_file method is out of question. +# We will use the IN_PYTHON_EOF as an indicator to just close the method +# +# The two parts using it are tightly integrated anyway +IN_PYTHON_EOF = -9999999999999 + + + +def supports(fn, d): + """Return True if fn has a supported extension""" + return os.path.splitext(fn)[-1] in [".bb", ".bbclass", ".inc"] + +def inherit(files, fn, lineno, d): + __inherit_cache = d.getVar('__inherit_cache') or [] + files = d.expand(files).split() + for file in files: + if not os.path.isabs(file) and not file.endswith(".bbclass"): + file = os.path.join('classes', '%s.bbclass' % file) + + if not os.path.isabs(file): + dname = os.path.dirname(fn) + bbpath = "%s:%s" % (dname, d.getVar("BBPATH", True)) + abs_fn, attempts = bb.utils.which(bbpath, file, history=True) + for af in attempts: + if af != abs_fn: + bb.parse.mark_dependency(d, af) + if abs_fn: + file = abs_fn + + if not file in __inherit_cache: + logger.log(logging.DEBUG -1, "BB %s:%d: inheriting %s", fn, lineno, file) + __inherit_cache.append( file ) + d.setVar('__inherit_cache', __inherit_cache) + include(fn, file, lineno, d, "inherit") + __inherit_cache = d.getVar('__inherit_cache') or [] + +def get_statements(filename, absolute_filename, base_name): + global cached_statements + + try: + return cached_statements[absolute_filename] + except KeyError: + file = open(absolute_filename, 'r') + statements = ast.StatementGroup() + + lineno = 0 + while True: + lineno = lineno + 1 + s = file.readline() + if not s: break + s = s.rstrip() + feeder(lineno, s, filename, base_name, statements) + file.close() + if __inpython__: + # add a blank line to close out any python definition + feeder(IN_PYTHON_EOF, "", filename, base_name, statements) + + if filename.endswith(".bbclass") or filename.endswith(".inc"): + cached_statements[absolute_filename] = statements + return statements + +def handle(fn, d, include): + global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__, __classname__ + __body__ = [] + __infunc__ = "" + __classname__ = "" + __residue__ = [] + + + if include == 0: + logger.debug(2, "BB %s: handle(data)", fn) + else: + logger.debug(2, "BB %s: handle(data, include)", fn) + + base_name = os.path.basename(fn) + (root, ext) = os.path.splitext(base_name) + init(d) + + if ext == ".bbclass": + __classname__ = root + __inherit_cache = d.getVar('__inherit_cache') or [] + if not fn in __inherit_cache: + __inherit_cache.append(fn) + d.setVar('__inherit_cache', __inherit_cache) + + if include != 0: + oldfile = d.getVar('FILE') + else: + oldfile = None + + abs_fn = resolve_file(fn, d) + + if include: + bb.parse.mark_dependency(d, abs_fn) + + # actual loading + statements = get_statements(fn, abs_fn, base_name) + + # DONE WITH PARSING... time to evaluate + if ext != ".bbclass": + d.setVar('FILE', abs_fn) + + try: + statements.eval(d) + except bb.parse.SkipPackage: + bb.data.setVar("__SKIPPED", True, d) + if include == 0: + return { "" : d } + + if ext != ".bbclass" and include == 0: + return ast.multi_finalize(fn, d) + + if oldfile: + d.setVar("FILE", oldfile) + + return d + +def feeder(lineno, s, fn, root, statements): + global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, bb, __residue__, __classname__ + if __infunc__: + if s == '}': + __body__.append('') + ast.handleMethod(statements, fn, lineno, __infunc__, __body__) + __infunc__ = "" + __body__ = [] + else: + __body__.append(s) + return + + if __inpython__: + m = __python_func_regexp__.match(s) + if m and lineno != IN_PYTHON_EOF: + __body__.append(s) + return + else: + ast.handlePythonMethod(statements, fn, lineno, __inpython__, + root, __body__) + __body__ = [] + __inpython__ = False + + if lineno == IN_PYTHON_EOF: + return + + if s and s[0] == '#': + if len(__residue__) != 0 and __residue__[0][0] != "#": + bb.fatal("There is a comment on line %s of file %s (%s) which is in the middle of a multiline expression.\nBitbake used to ignore these but no longer does so, please fix your metadata as errors are likely as a result of this change." % (lineno, fn, s)) + + if len(__residue__) != 0 and __residue__[0][0] == "#" and (not s or s[0] != "#"): + bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s)) + + if s and s[-1] == '\\': + __residue__.append(s[:-1]) + return + + s = "".join(__residue__) + s + __residue__ = [] + + # Skip empty lines + if s == '': + return + + # Skip comments + if s[0] == '#': + return + + m = __func_start_regexp__.match(s) + if m: + __infunc__ = m.group("func") or "__anonymous" + ast.handleMethodFlags(statements, fn, lineno, __infunc__, m) + return + + m = __def_regexp__.match(s) + if m: + __body__.append(s) + __inpython__ = m.group(1) + + return + + m = __export_func_regexp__.match(s) + if m: + ast.handleExportFuncs(statements, fn, lineno, m, __classname__) + return + + m = __addtask_regexp__.match(s) + if m: + ast.handleAddTask(statements, fn, lineno, m) + return + + m = __deltask_regexp__.match(s) + if m: + ast.handleDelTask(statements, fn, lineno, m) + return + + m = __addhandler_regexp__.match(s) + if m: + ast.handleBBHandlers(statements, fn, lineno, m) + return + + m = __inherit_regexp__.match(s) + if m: + ast.handleInherit(statements, fn, lineno, m) + return + + return ConfHandler.feeder(lineno, s, fn, statements) + +# Add us to the handlers list +from .. import handlers +handlers.append({'supports': supports, 'handle': handle, 'init': init}) +del handlers diff --git a/bitbake/lib/bb/parse/parse_py/ConfHandler.py b/bitbake/lib/bb/parse/parse_py/ConfHandler.py new file mode 100644 index 0000000000..978ebe4608 --- /dev/null +++ b/bitbake/lib/bb/parse/parse_py/ConfHandler.py @@ -0,0 +1,189 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" + class for handling configuration data files + + Reads a .conf file and obtains its metadata + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import re, os +import logging +import bb.utils +from bb.parse import ParseError, resolve_file, ast, logger + +__config_regexp__ = re.compile( r""" + ^ + (?Pexport\s*)? + (?P[a-zA-Z0-9\-~_+.${}/]+?) + (\[(?P[a-zA-Z0-9\-_+.]+)\])? + + \s* ( + (?P:=) | + (?P\?\?=) | + (?P\?=) | + (?P\+=) | + (?P=\+) | + (?P=\.) | + (?P\.=) | + = + ) \s* + + (?!'[^']*'[^']*'$) + (?!\"[^\"]*\"[^\"]*\"$) + (?P['\"]) + (?P.*) + (?P=apo) + $ + """, re.X) +__include_regexp__ = re.compile( r"include\s+(.+)" ) +__require_regexp__ = re.compile( r"require\s+(.+)" ) +__export_regexp__ = re.compile( r"export\s+([a-zA-Z0-9\-_+.${}/]+)$" ) + +def init(data): + topdir = data.getVar('TOPDIR') + if not topdir: + data.setVar('TOPDIR', os.getcwd()) + + +def supports(fn, d): + return fn[-5:] == ".conf" + +def include(oldfn, fn, lineno, data, error_out): + """ + error_out: A string indicating the verb (e.g. "include", "inherit") to be + used in a ParseError that will be raised if the file to be included could + not be included. Specify False to avoid raising an error in this case. + """ + if oldfn == fn: # prevent infinite recursion + return None + + import bb + fn = data.expand(fn) + oldfn = data.expand(oldfn) + + if not os.path.isabs(fn): + dname = os.path.dirname(oldfn) + bbpath = "%s:%s" % (dname, data.getVar("BBPATH", True)) + abs_fn, attempts = bb.utils.which(bbpath, fn, history=True) + if abs_fn and bb.parse.check_dependency(data, abs_fn): + bb.warn("Duplicate inclusion for %s in %s" % (abs_fn, data.getVar('FILE', True))) + for af in attempts: + bb.parse.mark_dependency(data, af) + if abs_fn: + fn = abs_fn + elif bb.parse.check_dependency(data, fn): + bb.warn("Duplicate inclusion for %s in %s" % (fn, data.getVar('FILE', True))) + + from bb.parse import handle + try: + ret = handle(fn, data, True) + except (IOError, OSError): + if error_out: + raise ParseError("Could not %(error_out)s file %(fn)s" % vars(), oldfn, lineno) + logger.debug(2, "CONF file '%s' not found", fn) + bb.parse.mark_dependency(data, fn) + +# We have an issue where a UI might want to enforce particular settings such as +# an empty DISTRO variable. If configuration files do something like assigning +# a weak default, it turns out to be very difficult to filter out these changes, +# particularly when the weak default might appear half way though parsing a chain +# of configuration files. We therefore let the UIs hook into configuration file +# parsing. This turns out to be a hard problem to solve any other way. +confFilters = [] + +def handle(fn, data, include): + init(data) + + if include == 0: + oldfile = None + else: + oldfile = data.getVar('FILE') + + abs_fn = resolve_file(fn, data) + f = open(abs_fn, 'r') + + if include: + bb.parse.mark_dependency(data, abs_fn) + + statements = ast.StatementGroup() + lineno = 0 + while True: + lineno = lineno + 1 + s = f.readline() + if not s: + break + w = s.strip() + # skip empty lines + if not w: + continue + s = s.rstrip() + while s[-1] == '\\': + s2 = f.readline().strip() + lineno = lineno + 1 + if (not s2 or s2 and s2[0] != "#") and s[0] == "#" : + bb.fatal("There is a confusing multiline, partially commented expression on line %s of file %s (%s).\nPlease clarify whether this is all a comment or should be parsed." % (lineno, fn, s)) + s = s[:-1] + s2 + # skip comments + if s[0] == '#': + continue + feeder(lineno, s, abs_fn, statements) + + # DONE WITH PARSING... time to evaluate + data.setVar('FILE', abs_fn) + statements.eval(data) + if oldfile: + data.setVar('FILE', oldfile) + + f.close() + + for f in confFilters: + f(fn, data) + + return data + +def feeder(lineno, s, fn, statements): + m = __config_regexp__.match(s) + if m: + groupd = m.groupdict() + ast.handleData(statements, fn, lineno, groupd) + return + + m = __include_regexp__.match(s) + if m: + ast.handleInclude(statements, fn, lineno, m, False) + return + + m = __require_regexp__.match(s) + if m: + ast.handleInclude(statements, fn, lineno, m, True) + return + + m = __export_regexp__.match(s) + if m: + ast.handleExport(statements, fn, lineno, m) + return + + raise ParseError("unparsed line: '%s'" % s, fn, lineno); + +# Add us to the handlers list +from bb.parse import handlers +handlers.append({'supports': supports, 'handle': handle, 'init': init}) +del handlers diff --git a/bitbake/lib/bb/parse/parse_py/__init__.py b/bitbake/lib/bb/parse/parse_py/__init__.py new file mode 100644 index 0000000000..3e658d0de9 --- /dev/null +++ b/bitbake/lib/bb/parse/parse_py/__init__.py @@ -0,0 +1,33 @@ +#!/usr/bin/env python +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +""" +BitBake Parsers + +File parsers for the BitBake build tools. + +""" + +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. +# +# Based on functions from the base bb module, Copyright 2003 Holger Schurig + +from __future__ import absolute_import +from . import ConfHandler +from . import BBHandler + +__version__ = '1.0' diff --git a/bitbake/lib/bb/persist_data.py b/bitbake/lib/bb/persist_data.py new file mode 100644 index 0000000000..994e61b0a6 --- /dev/null +++ b/bitbake/lib/bb/persist_data.py @@ -0,0 +1,215 @@ +"""BitBake Persistent Data Store + +Used to store data in a central location such that other threads/tasks can +access them at some future date. Acts as a convenience wrapper around sqlite, +currently, providing a key/value store accessed by 'domain'. +""" + +# Copyright (C) 2007 Richard Purdie +# Copyright (C) 2010 Chris Larson +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import collections +import logging +import os.path +import sys +import warnings +from bb.compat import total_ordering +from collections import Mapping + +try: + import sqlite3 +except ImportError: + from pysqlite2 import dbapi2 as sqlite3 + +sqlversion = sqlite3.sqlite_version_info +if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3): + raise Exception("sqlite3 version 3.3.0 or later is required.") + + +logger = logging.getLogger("BitBake.PersistData") +if hasattr(sqlite3, 'enable_shared_cache'): + try: + sqlite3.enable_shared_cache(True) + except sqlite3.OperationalError: + pass + + +@total_ordering +class SQLTable(collections.MutableMapping): + """Object representing a table/domain in the database""" + def __init__(self, cachefile, table): + self.cachefile = cachefile + self.table = table + self.cursor = connect(self.cachefile) + + self._execute("CREATE TABLE IF NOT EXISTS %s(key TEXT, value TEXT);" + % table) + + def _execute(self, *query): + """Execute a query, waiting to acquire a lock if necessary""" + count = 0 + while True: + try: + return self.cursor.execute(*query) + except sqlite3.OperationalError as exc: + if 'database is locked' in str(exc) and count < 500: + count = count + 1 + self.cursor.close() + self.cursor = connect(self.cachefile) + continue + raise + + def __enter__(self): + self.cursor.__enter__() + return self + + def __exit__(self, *excinfo): + self.cursor.__exit__(*excinfo) + + def __getitem__(self, key): + data = self._execute("SELECT * from %s where key=?;" % + self.table, [key]) + for row in data: + return row[1] + raise KeyError(key) + + def __delitem__(self, key): + if key not in self: + raise KeyError(key) + self._execute("DELETE from %s where key=?;" % self.table, [key]) + + def __setitem__(self, key, value): + if not isinstance(key, basestring): + raise TypeError('Only string keys are supported') + elif not isinstance(value, basestring): + raise TypeError('Only string values are supported') + + data = self._execute("SELECT * from %s where key=?;" % + self.table, [key]) + exists = len(list(data)) + if exists: + self._execute("UPDATE %s SET value=? WHERE key=?;" % self.table, + [value, key]) + else: + self._execute("INSERT into %s(key, value) values (?, ?);" % + self.table, [key, value]) + + def __contains__(self, key): + return key in set(self) + + def __len__(self): + data = self._execute("SELECT COUNT(key) FROM %s;" % self.table) + for row in data: + return row[0] + + def __iter__(self): + data = self._execute("SELECT key FROM %s;" % self.table) + return (row[0] for row in data) + + def __lt__(self, other): + if not isinstance(other, Mapping): + raise NotImplemented + + return len(self) < len(other) + + def get_by_pattern(self, pattern): + data = self._execute("SELECT * FROM %s WHERE key LIKE ?;" % + self.table, [pattern]) + return [row[1] for row in data] + + def values(self): + return list(self.itervalues()) + + def itervalues(self): + data = self._execute("SELECT value FROM %s;" % self.table) + return (row[0] for row in data) + + def items(self): + return list(self.iteritems()) + + def iteritems(self): + return self._execute("SELECT * FROM %s;" % self.table) + + def clear(self): + self._execute("DELETE FROM %s;" % self.table) + + def has_key(self, key): + return key in self + + +class PersistData(object): + """Deprecated representation of the bitbake persistent data store""" + def __init__(self, d): + warnings.warn("Use of PersistData is deprecated. Please use " + "persist(domain, d) instead.", + category=DeprecationWarning, + stacklevel=2) + + self.data = persist(d) + logger.debug(1, "Using '%s' as the persistent data cache", + self.data.filename) + + def addDomain(self, domain): + """ + Add a domain (pending deprecation) + """ + return self.data[domain] + + def delDomain(self, domain): + """ + Removes a domain and all the data it contains + """ + del self.data[domain] + + def getKeyValues(self, domain): + """ + Return a list of key + value pairs for a domain + """ + return self.data[domain].items() + + def getValue(self, domain, key): + """ + Return the value of a key for a domain + """ + return self.data[domain][key] + + def setValue(self, domain, key, value): + """ + Sets the value of a key for a domain + """ + self.data[domain][key] = value + + def delValue(self, domain, key): + """ + Deletes a key/value pair + """ + del self.data[domain][key] + +def connect(database): + return sqlite3.connect(database, timeout=5, isolation_level=None) + +def persist(domain, d): + """Convenience factory for SQLTable objects based upon metadata""" + import bb.utils + cachedir = (d.getVar("PERSISTENT_DIR", True) or + d.getVar("CACHE", True)) + if not cachedir: + logger.critical("Please set the 'PERSISTENT_DIR' or 'CACHE' variable") + sys.exit(1) + + bb.utils.mkdirhier(cachedir) + cachefile = os.path.join(cachedir, "bb_persist_data.sqlite3") + return SQLTable(cachefile, domain) diff --git a/bitbake/lib/bb/process.py b/bitbake/lib/bb/process.py new file mode 100644 index 0000000000..8b1aea9a10 --- /dev/null +++ b/bitbake/lib/bb/process.py @@ -0,0 +1,133 @@ +import logging +import signal +import subprocess +import errno +import select + +logger = logging.getLogger('BitBake.Process') + +def subprocess_setup(): + # Python installs a SIGPIPE handler by default. This is usually not what + # non-Python subprocesses expect. + signal.signal(signal.SIGPIPE, signal.SIG_DFL) + +class CmdError(RuntimeError): + def __init__(self, command, msg=None): + self.command = command + self.msg = msg + + def __str__(self): + if not isinstance(self.command, basestring): + cmd = subprocess.list2cmdline(self.command) + else: + cmd = self.command + + msg = "Execution of '%s' failed" % cmd + if self.msg: + msg += ': %s' % self.msg + return msg + +class NotFoundError(CmdError): + def __str__(self): + return CmdError.__str__(self) + ": command not found" + +class ExecutionError(CmdError): + def __init__(self, command, exitcode, stdout = None, stderr = None): + CmdError.__init__(self, command) + self.exitcode = exitcode + self.stdout = stdout + self.stderr = stderr + + def __str__(self): + message = "" + if self.stderr: + message += self.stderr + if self.stdout: + message += self.stdout + if message: + message = ":\n" + message + return (CmdError.__str__(self) + + " with exit code %s" % self.exitcode + message) + +class Popen(subprocess.Popen): + defaults = { + "close_fds": True, + "preexec_fn": subprocess_setup, + "stdout": subprocess.PIPE, + "stderr": subprocess.STDOUT, + "stdin": subprocess.PIPE, + "shell": False, + } + + def __init__(self, *args, **kwargs): + options = dict(self.defaults) + options.update(kwargs) + subprocess.Popen.__init__(self, *args, **options) + +def _logged_communicate(pipe, log, input): + if pipe.stdin: + if input is not None: + pipe.stdin.write(input) + pipe.stdin.close() + + outdata, errdata = [], [] + rin = [] + + if pipe.stdout is not None: + bb.utils.nonblockingfd(pipe.stdout.fileno()) + rin.append(pipe.stdout) + if pipe.stderr is not None: + bb.utils.nonblockingfd(pipe.stderr.fileno()) + rin.append(pipe.stderr) + + try: + while pipe.poll() is None: + rlist = rin + try: + r,w,e = select.select (rlist, [], [], 1) + except OSError as e: + if e.errno != errno.EINTR: + raise + + if pipe.stdout in r: + data = pipe.stdout.read() + if data is not None: + outdata.append(data) + log.write(data) + + if pipe.stderr in r: + data = pipe.stderr.read() + if data is not None: + errdata.append(data) + log.write(data) + finally: + log.flush() + if pipe.stdout is not None: + pipe.stdout.close() + if pipe.stderr is not None: + pipe.stderr.close() + return ''.join(outdata), ''.join(errdata) + +def run(cmd, input=None, log=None, **options): + """Convenience function to run a command and return its output, raising an + exception when the command fails""" + + if isinstance(cmd, basestring) and not "shell" in options: + options["shell"] = True + + try: + pipe = Popen(cmd, **options) + except OSError as exc: + if exc.errno == 2: + raise NotFoundError(cmd) + else: + raise CmdError(cmd, exc) + + if log: + stdout, stderr = _logged_communicate(pipe, log, input) + else: + stdout, stderr = pipe.communicate(input) + + if pipe.returncode != 0: + raise ExecutionError(cmd, pipe.returncode, stdout, stderr) + return stdout, stderr diff --git a/bitbake/lib/bb/providers.py b/bitbake/lib/bb/providers.py new file mode 100644 index 0000000000..637e1fab96 --- /dev/null +++ b/bitbake/lib/bb/providers.py @@ -0,0 +1,381 @@ +# ex:ts=4:sw=4:sts=4:et +# -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- +# +# Copyright (C) 2003, 2004 Chris Larson +# Copyright (C) 2003, 2004 Phil Blundell +# Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer +# Copyright (C) 2005 Holger Hans Peter Freyther +# Copyright (C) 2005 ROAD GmbH +# Copyright (C) 2006 Richard Purdie +# +# This program is free software; you can redistribute it and/or modify +# it under the terms of the GNU General Public License version 2 as +# published by the Free Software Foundation. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License along +# with this program; if not, write to the Free Software Foundation, Inc., +# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. + +import re +import logging +from bb import data, utils +from collections import defaultdict +import bb + +logger = logging.getLogger("BitBake.Provider") + +class NoProvider(bb.BBHandledException): + """Exception raised when no provider of a build dependency can be found""" + +class NoRProvider(bb.BBHandledException): + """Exception raised when no provider of a runtime dependency can be found""" + +class MultipleRProvider(bb.BBHandledException): + """Exception raised when multiple providers of a runtime dependency can be found""" + +def findProviders(cfgData, dataCache, pkg_pn = None): + """ + Convenience function to get latest and preferred providers in pkg_pn + """ + + if not pkg_pn: + pkg_pn = dataCache.pkg_pn + + # Need to ensure data store is expanded + localdata = data.createCopy(cfgData) + bb.data.update_data(localdata) + bb.data.expandKeys(localdata) + + preferred_versions = {} + latest_versions = {} + + for pn in pkg_pn: + (last_ver, last_file, pref_ver, pref_file) = findBestProvider(pn, localdata, dataCache, pkg_pn) + preferred_versions[pn] = (pref_ver, pref_file) + latest_versions[pn] = (last_ver, last_file) + + return (latest_versions, preferred_versions) + + +def allProviders(dataCache): + """ + Find all providers for each pn + """ + all_providers = defaultdict(list) + for (fn, pn) in dataCache.pkg_fn.items(): + ver = dataCache.pkg_pepvpr[fn] + all_providers[pn].append((ver, fn)) + return all_providers + + +def sortPriorities(pn, dataCache, pkg_pn = None): + """ + Reorder pkg_pn by file priority and default preference + """ + + if not pkg_pn: + pkg_pn = dataCache.pkg_pn + + files = pkg_pn[pn] + priorities = {} + for f in files: + priority = dataCache.bbfile_priority[f] + preference = dataCache.pkg_dp[f] + if priority not in priorities: + priorities[priority] = {} + if preference not in priorities[priority]: + priorities[priority][preference] = [] + priorities[priority][preference].append(f) + tmp_pn = [] + for pri in sorted(priorities): + tmp_pref = [] + for pref in sorted(priorities[pri]): + tmp_pref.extend(priorities[pri][pref]) + tmp_pn = [tmp_pref] + tmp_pn + + return tmp_pn + +def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): + """ + Check if the version pe,pv,pr is the preferred one. + If there is preferred version defined and ends with '%', then pv has to start with that version after removing the '%' + """ + if (pr == preferred_r or preferred_r == None): + if (pe == preferred_e or preferred_e == None): + if preferred_v == pv: + return True + if preferred_v != None and preferred_v.endswith('%') and pv.startswith(preferred_v[:len(preferred_v)-1]): + return True + return False + +def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): + """ + Find the first provider in pkg_pn with a PREFERRED_VERSION set. + """ + + preferred_file = None + preferred_ver = None + + localdata = data.createCopy(cfgData) + localdata.setVar('OVERRIDES', "%s:pn-%s:%s" % (data.getVar('OVERRIDES', localdata), pn, pn)) + bb.data.update_data(localdata) + + preferred_v = localdata.getVar('PREFERRED_VERSION', True) + if preferred_v: + m = re.match('(\d+:)*(.*)(_.*)*', preferred_v) + if m: + if m.group(1): + preferred_e = m.group(1)[:-1] + else: + preferred_e = None + preferred_v = m.group(2) + if m.group(3): + preferred_r = m.group(3)[1:] + else: + preferred_r = None + else: + preferred_e = None + preferred_r = None + + for file_set in pkg_pn: + for f in file_set: + pe, pv, pr = dataCache.pkg_pepvpr[f] + if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): + preferred_file = f + preferred_ver = (pe, pv, pr) + break + if preferred_file: + break; + if preferred_r: + pv_str = '%s-%s' % (preferred_v, preferred_r) + else: + pv_str = preferred_v + if not (preferred_e is None): + pv_str = '%s:%s' % (preferred_e, pv_str) + itemstr = "" + if item: + itemstr = " (for item %s)" % item + if preferred_file is None: + logger.info("preferred version %s of %s not available%s", pv_str, pn, itemstr) + available_vers = [] + for file_set in pkg_pn: + for f in file_set: + pe, pv, pr = dataCache.pkg_pepvpr[f] + ver_str = pv + if pe: + ver_str = "%s:%s" % (pe, ver_str) + if not ver_str in available_vers: + available_vers.append(ver_str) + if available_vers: + available_vers.sort() + logger.info("versions of %s available: %s", pn, ' '.join(available_vers)) + else: + logger.debug(1, "selecting %s as PREFERRED_VERSION %s of package %s%s", preferred_file, pv_str, pn, itemstr) + + return (preferred_ver, preferred_file) + + +def findLatestProvider(pn, cfgData, dataCache, file_set): + """ + Return the highest version of the providers in file_set. + Take default preferences into account. + """ + latest = None + latest_p = 0 + latest_f = None + for file_name in file_set: + pe, pv, pr = dataCache.pkg_pepvpr[file_name] + dp = dataCache.pkg_dp[file_name] + + if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p): + latest = (pe, pv, pr) + latest_f = file_name + latest_p = dp + + return (latest, latest_f) + + +def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): + """ + If there is a PREFERRED_VERSION, find the highest-priority bbfile + providing that version. If not, find the latest version provided by + an bbfile in the highest-priority set. + """ + + sortpkg_pn = sortPriorities(pn, dataCache, pkg_pn) + # Find the highest priority provider with a PREFERRED_VERSION set + (preferred_ver, preferred_file) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item) + # Find the latest version of the highest priority provider + (latest, latest_f) = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[0]) + + if preferred_file is None: + preferred_file = latest_f + preferred_ver = latest + + return (latest, latest_f, preferred_ver, preferred_file) + + +def _filterProviders(providers, item, cfgData, dataCache): + """ + Take a list of providers and filter/reorder according to the + environment variables and previous build results + """ + eligible = [] + preferred_versions = {} + sortpkg_pn = {} + + # The order of providers depends on the order of the files on the disk + # up to here. Sort pkg_pn to make dependency issues reproducible rather + # than effectively random. + providers.sort() + + # Collate providers by PN + pkg_pn = {} + for p in providers: + pn = dataCache.pkg_fn[p] + if pn not in pkg_pn: + pkg_pn[pn] = [] + pkg_pn[pn].append(p) + + logger.debug(1, "providers for %s are: %s", item, pkg_pn.keys()) + + # First add PREFERRED_VERSIONS + for pn in pkg_pn: + sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn) + preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item) + if preferred_versions[pn][1]: + eligible.append(preferred_versions[pn][1]) + + # Now add latest versions + for pn in sortpkg_pn: + if pn in preferred_versions and preferred_versions[pn][1]: + continue + preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0]) + eligible.append(preferred_versions[pn][1]) + + if len(eligible) == 0: + logger.error("no eligible providers for %s", item) + return 0 + + # If pn == item, give it a slight default preference + # This means PREFERRED_PROVIDER_foobar defaults to foobar if available + for p in providers: + pn = dataCache.pkg_fn[p] + if pn != item: + continue + (newvers, fn) = preferred_versions[pn] + if not fn in eligible: + continue + eligible.remove(fn) + eligible = [fn] + eligible + + return eligible + + +def filterProviders(providers, item, cfgData, dataCache): + """ + Take a list of providers and filter/reorder according to the + environment variables and previous build results + Takes a "normal" target item + """ + + eligible = _filterProviders(providers, item, cfgData, dataCache) + + prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % item, True) + if prefervar: + dataCache.preferred[item] = prefervar + + foundUnique = False + if item in dataCache.preferred: + for p in eligible: + pn = dataCache.pkg_fn[p] + if dataCache.preferred[item] == pn: + logger.verbose("selecting %s to satisfy %s due to PREFERRED_PROVIDERS", pn, item) + eligible.remove(p) + eligible = [p] + eligible + foundUnique = True + break + + logger.debug(1, "sorted providers for %s are: %s", item, eligible) + + return eligible, foundUnique + +def filterProvidersRunTime(providers, item, cfgData, dataCache): + """ + Take a list of providers and filter/reorder according to the + environment variables and previous build results + Takes a "runtime" target item + """ + + eligible = _filterProviders(providers, item, cfgData, dataCache) + + # Should use dataCache.preferred here? + preferred = [] + preferred_vars = [] + pns = {} + for p in eligible: + pns[dataCache.pkg_fn[p]] = p + for p in eligible: + pn = dataCache.pkg_fn[p] + provides = dataCache.pn_provides[pn] + for provide in provides: + prefervar = cfgData.getVar('PREFERRED_PROVIDER_%s' % provide, True) + #logger.debug(1, "checking PREFERRED_PROVIDER_%s (value %s) against %s", provide, prefervar, pns.keys()) + if prefervar in pns and pns[prefervar] not in preferred: + var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar) + logger.verbose("selecting %s to satisfy runtime %s due to %s", prefervar, item, var) + preferred_vars.append(var) + pref = pns[prefervar] + eligible.remove(pref) + eligible = [pref] + eligible + preferred.append(pref) + break + + numberPreferred = len(preferred) + + if numberPreferred > 1: + logger.error("Trying to resolve runtime dependency %s resulted in conflicting PREFERRED_PROVIDER entries being found.\nThe providers found were: %s\nThe PREFERRED_PROVIDER entries resulting in this conflict were: %s", item, preferred, preferred_vars) + + logger.debug(1, "sorted runtime providers for %s are: %s", item, eligible) + + return eligible, numberPreferred + +regexp_cache = {} + +def getRuntimeProviders(dataCache, rdepend): + """ + Return any providers of runtime dependency + """ + rproviders = [] + + if rdepend in dataCache.rproviders: + rproviders += dataCache.rproviders[rdepend] + + if rdepend in dataCache.packages: + rproviders += dataCache.packages[rdepend] + + if rproviders: + return rproviders + + # Only search dynamic packages if we can't find anything in other variables + for pattern in dataCache.packages_dynamic: + pattern = pattern.replace('+', "\+") + if pattern in regexp_cache: + regexp = regexp_cache[pattern] + else: + try: + regexp = re.compile(pattern) + except: + logger.error("Error parsing regular expression '%s'", pattern) + raise + regexp_cache[pattern] = regexp + if regexp.match(rdepend): + rproviders += dataCache.packages_dynamic[pattern] + logger.debug(1, "Assuming %s is a dynamic package, but it may not exist" % rdepend) + + return rproviders diff --git a/bitbake/lib/bb/pysh/__init__.py b/bitbake/lib/bb/pysh/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/bitbake/lib/bb/pysh/builtin.py b/bitbake/lib/bb/pysh/builtin.py new file mode 100644 index 0000000000..b748e4a4f2 --- /dev/null +++ b/bitbake/lib/bb/pysh/builtin.py @@ -0,0 +1,710 @@ +# builtin.py - builtins and utilities definitions for pysh. +# +# Copyright 2007 Patrick Mezard +# +# This software may be used and distributed according to the terms +# of the GNU General Public License, incorporated herein by reference. + +"""Builtin and internal utilities implementations. + +- Beware not to use python interpreter environment as if it were the shell +environment. For instance, commands working directory must be explicitely handled +through env['PWD'] instead of relying on python working directory. +""" +import errno +import optparse +import os +import re +import subprocess +import sys +import time + +def has_subprocess_bug(): + return getattr(subprocess, 'list2cmdline') and \ + ( subprocess.list2cmdline(['']) == '' or \ + subprocess.list2cmdline(['foo|bar']) == 'foo|bar') + +# Detect python bug 1634343: "subprocess swallows empty arguments under win32" +# +# Also detect: "[ 1710802 ] subprocess must escape redirection characters under win32" +# +if has_subprocess_bug(): + import subprocess_fix + subprocess.list2cmdline = subprocess_fix.list2cmdline + +from sherrors import * + +class NonExitingParser(optparse.OptionParser): + """OptionParser default behaviour upon error is to print the error message and + exit. Raise a utility error instead. + """ + def error(self, msg): + raise UtilityError(msg) + +#------------------------------------------------------------------------------- +# set special builtin +#------------------------------------------------------------------------------- +OPT_SET = NonExitingParser(usage="set - set or unset options and positional parameters") +OPT_SET.add_option( '-f', action='store_true', dest='has_f', default=False, + help='The shell shall disable pathname expansion.') +OPT_SET.add_option('-e', action='store_true', dest='has_e', default=False, + help="""When this option is on, if a simple command fails for any of the \ + reasons listed in Consequences of Shell Errors or returns an exit status \ + value >0, and is not part of the compound list following a while, until, \ + or if keyword, and is not a part of an AND or OR list, and is not a \ + pipeline preceded by the ! reserved word, then the shell shall immediately \ + exit.""") +OPT_SET.add_option('-x', action='store_true', dest='has_x', default=False, + help="""The shell shall write to standard error a trace for each command \ + after it expands the command and before it executes it. It is unspecified \ + whether the command that turns tracing off is traced.""") + +def builtin_set(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_SET.parse_args(args) + env = interp.get_env() + + if option.has_f: + env.set_opt('-f') + if option.has_e: + env.set_opt('-e') + if option.has_x: + env.set_opt('-x') + return 0 + +#------------------------------------------------------------------------------- +# shift special builtin +#------------------------------------------------------------------------------- +def builtin_shift(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + params = interp.get_env().get_positional_args() + if args: + try: + n = int(args[0]) + if n > len(params): + raise ValueError() + except ValueError: + return 1 + else: + n = 1 + + params[:n] = [] + interp.get_env().set_positional_args(params) + return 0 + +#------------------------------------------------------------------------------- +# export special builtin +#------------------------------------------------------------------------------- +OPT_EXPORT = NonExitingParser(usage="set - set or unset options and positional parameters") +OPT_EXPORT.add_option('-p', action='store_true', dest='has_p', default=False) + +def builtin_export(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_EXPORT.parse_args(args) + if option.has_p: + raise NotImplementedError() + + for arg in args: + try: + name, value = arg.split('=', 1) + except ValueError: + name, value = arg, None + env = interp.get_env().export(name, value) + + return 0 + +#------------------------------------------------------------------------------- +# return special builtin +#------------------------------------------------------------------------------- +def builtin_return(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + res = 0 + if args: + try: + res = int(args[0]) + except ValueError: + res = 0 + if not 0<=res<=255: + res = 0 + + # BUG: should be last executed command exit code + raise ReturnSignal(res) + +#------------------------------------------------------------------------------- +# trap special builtin +#------------------------------------------------------------------------------- +def builtin_trap(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + if len(args) < 2: + stderr.write('trap: usage: trap [[arg] signal_spec ...]\n') + return 2 + + action = args[0] + for sig in args[1:]: + try: + env.traps[sig] = action + except Exception as e: + stderr.write('trap: %s\n' % str(e)) + return 0 + +#------------------------------------------------------------------------------- +# unset special builtin +#------------------------------------------------------------------------------- +OPT_UNSET = NonExitingParser("unset - unset values and attributes of variables and functions") +OPT_UNSET.add_option( '-f', action='store_true', dest='has_f', default=False) +OPT_UNSET.add_option( '-v', action='store_true', dest='has_v', default=False) + +def builtin_unset(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_UNSET.parse_args(args) + + status = 0 + env = interp.get_env() + for arg in args: + try: + if option.has_f: + env.remove_function(arg) + else: + del env[arg] + except KeyError: + pass + except VarAssignmentError: + status = 1 + + return status + +#------------------------------------------------------------------------------- +# wait special builtin +#------------------------------------------------------------------------------- +def builtin_wait(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + return interp.wait([int(arg) for arg in args]) + +#------------------------------------------------------------------------------- +# cat utility +#------------------------------------------------------------------------------- +def utility_cat(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + if not args: + args = ['-'] + + status = 0 + for arg in args: + if arg == '-': + data = stdin.read() + else: + path = os.path.join(env['PWD'], arg) + try: + f = file(path, 'rb') + try: + data = f.read() + finally: + f.close() + except IOError as e: + if e.errno != errno.ENOENT: + raise + status = 1 + continue + stdout.write(data) + stdout.flush() + return status + +#------------------------------------------------------------------------------- +# cd utility +#------------------------------------------------------------------------------- +OPT_CD = NonExitingParser("cd - change the working directory") + +def utility_cd(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_CD.parse_args(args) + env = interp.get_env() + + directory = None + printdir = False + if not args: + home = env.get('HOME') + if home: + # Unspecified, do nothing + return 0 + else: + directory = home + elif len(args)==1: + directory = args[0] + if directory=='-': + if 'OLDPWD' not in env: + raise UtilityError("OLDPWD not set") + printdir = True + directory = env['OLDPWD'] + else: + raise UtilityError("too many arguments") + + curpath = None + # Absolute directories will be handled correctly by the os.path.join call. + if not directory.startswith('.') and not directory.startswith('..'): + cdpaths = env.get('CDPATH', '.').split(';') + for cdpath in cdpaths: + p = os.path.join(cdpath, directory) + if os.path.isdir(p): + curpath = p + break + + if curpath is None: + curpath = directory + curpath = os.path.join(env['PWD'], directory) + + env['OLDPWD'] = env['PWD'] + env['PWD'] = curpath + if printdir: + stdout.write('%s\n' % curpath) + return 0 + +#------------------------------------------------------------------------------- +# colon utility +#------------------------------------------------------------------------------- +def utility_colon(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + return 0 + +#------------------------------------------------------------------------------- +# echo utility +#------------------------------------------------------------------------------- +def utility_echo(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + # Echo only takes arguments, no options. Use printf if you need fancy stuff. + output = ' '.join(args) + '\n' + stdout.write(output) + stdout.flush() + return 0 + +#------------------------------------------------------------------------------- +# egrep utility +#------------------------------------------------------------------------------- +# egrep is usually a shell script. +# Unfortunately, pysh does not support shell scripts *with arguments* right now, +# so the redirection is implemented here, assuming grep is available. +def utility_egrep(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + return run_command('grep', ['-E'] + args, interp, env, stdin, stdout, + stderr, debugflags) + +#------------------------------------------------------------------------------- +# env utility +#------------------------------------------------------------------------------- +def utility_env(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + if args and args[0]=='-i': + raise NotImplementedError('env: -i option is not implemented') + + i = 0 + for arg in args: + if '=' not in arg: + break + # Update the current environment + name, value = arg.split('=', 1) + env[name] = value + i += 1 + + if args[i:]: + # Find then execute the specified interpreter + utility = env.find_in_path(args[i]) + if not utility: + return 127 + args[i:i+1] = utility + name = args[i] + args = args[i+1:] + try: + return run_command(name, args, interp, env, stdin, stdout, stderr, + debugflags) + except UtilityError: + stderr.write('env: failed to execute %s' % ' '.join([name]+args)) + return 126 + else: + for pair in env.get_variables().iteritems(): + stdout.write('%s=%s\n' % pair) + return 0 + +#------------------------------------------------------------------------------- +# exit utility +#------------------------------------------------------------------------------- +def utility_exit(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + res = None + if args: + try: + res = int(args[0]) + except ValueError: + res = None + if not 0<=res<=255: + res = None + + if res is None: + # BUG: should be last executed command exit code + res = 0 + + raise ExitSignal(res) + +#------------------------------------------------------------------------------- +# fgrep utility +#------------------------------------------------------------------------------- +# see egrep +def utility_fgrep(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + return run_command('grep', ['-F'] + args, interp, env, stdin, stdout, + stderr, debugflags) + +#------------------------------------------------------------------------------- +# gunzip utility +#------------------------------------------------------------------------------- +# see egrep +def utility_gunzip(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + return run_command('gzip', ['-d'] + args, interp, env, stdin, stdout, + stderr, debugflags) + +#------------------------------------------------------------------------------- +# kill utility +#------------------------------------------------------------------------------- +def utility_kill(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + for arg in args: + pid = int(arg) + status = subprocess.call(['pskill', '/T', str(pid)], + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.PIPE) + # pskill is asynchronous, hence the stupid polling loop + while 1: + p = subprocess.Popen(['pslist', str(pid)], + shell=True, + stdout=subprocess.PIPE, + stderr=subprocess.STDOUT) + output = p.communicate()[0] + if ('process %d was not' % pid) in output: + break + time.sleep(1) + return status + +#------------------------------------------------------------------------------- +# mkdir utility +#------------------------------------------------------------------------------- +OPT_MKDIR = NonExitingParser("mkdir - make directories.") +OPT_MKDIR.add_option('-p', action='store_true', dest='has_p', default=False) + +def utility_mkdir(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + # TODO: implement umask + # TODO: implement proper utility error report + option, args = OPT_MKDIR.parse_args(args) + for arg in args: + path = os.path.join(env['PWD'], arg) + if option.has_p: + try: + os.makedirs(path) + except IOError as e: + if e.errno != errno.EEXIST: + raise + else: + os.mkdir(path) + return 0 + +#------------------------------------------------------------------------------- +# netstat utility +#------------------------------------------------------------------------------- +def utility_netstat(name, args, interp, env, stdin, stdout, stderr, debugflags): + # Do you really expect me to implement netstat ? + # This empty form is enough for Mercurial tests since it's + # supposed to generate nothing upon success. Faking this test + # is not a big deal either. + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + return 0 + +#------------------------------------------------------------------------------- +# pwd utility +#------------------------------------------------------------------------------- +OPT_PWD = NonExitingParser("pwd - return working directory name") +OPT_PWD.add_option('-L', action='store_true', dest='has_L', default=True, + help="""If the PWD environment variable contains an absolute pathname of \ + the current directory that does not contain the filenames dot or dot-dot, \ + pwd shall write this pathname to standard output. Otherwise, the -L option \ + shall behave as the -P option.""") +OPT_PWD.add_option('-P', action='store_true', dest='has_L', default=False, + help="""The absolute pathname written shall not contain filenames that, in \ + the context of the pathname, refer to files of type symbolic link.""") + +def utility_pwd(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_PWD.parse_args(args) + stdout.write('%s\n' % env['PWD']) + return 0 + +#------------------------------------------------------------------------------- +# printf utility +#------------------------------------------------------------------------------- +RE_UNESCAPE = re.compile(r'(\\x[a-zA-Z0-9]{2}|\\[0-7]{1,3}|\\.)') + +def utility_printf(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + def replace(m): + assert m.group() + g = m.group()[1:] + if g.startswith('x'): + return chr(int(g[1:], 16)) + if len(g) <= 3 and len([c for c in g if c in '01234567']) == len(g): + # Yay, an octal number + return chr(int(g, 8)) + return { + 'a': '\a', + 'b': '\b', + 'f': '\f', + 'n': '\n', + 'r': '\r', + 't': '\t', + 'v': '\v', + '\\': '\\', + }.get(g) + + # Convert escape sequences + format = re.sub(RE_UNESCAPE, replace, args[0]) + stdout.write(format % tuple(args[1:])) + return 0 + +#------------------------------------------------------------------------------- +# true utility +#------------------------------------------------------------------------------- +def utility_true(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + return 0 + +#------------------------------------------------------------------------------- +# sed utility +#------------------------------------------------------------------------------- +RE_SED = re.compile(r'^s(.).*\1[a-zA-Z]*$') + +# cygwin sed fails with some expressions when they do not end with a single space. +# see unit tests for details. Interestingly, the same expressions works perfectly +# in cygwin shell. +def utility_sed(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + # Scan pattern arguments and append a space if necessary + for i in xrange(len(args)): + if not RE_SED.search(args[i]): + continue + args[i] = args[i] + ' ' + + return run_command(name, args, interp, env, stdin, stdout, + stderr, debugflags) + +#------------------------------------------------------------------------------- +# sleep utility +#------------------------------------------------------------------------------- +def utility_sleep(name, args, interp, env, stdin, stdout, stderr, debugflags): + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + time.sleep(int(args[0])) + return 0 + +#------------------------------------------------------------------------------- +# sort utility +#------------------------------------------------------------------------------- +OPT_SORT = NonExitingParser("sort - sort, merge, or sequence check text files") + +def utility_sort(name, args, interp, env, stdin, stdout, stderr, debugflags): + + def sort(path): + if path == '-': + lines = stdin.readlines() + else: + try: + f = file(path) + try: + lines = f.readlines() + finally: + f.close() + except IOError as e: + stderr.write(str(e) + '\n') + return 1 + + if lines and lines[-1][-1]!='\n': + lines[-1] = lines[-1] + '\n' + return lines + + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + option, args = OPT_SORT.parse_args(args) + alllines = [] + + if len(args)<=0: + args += ['-'] + + # Load all files lines + curdir = os.getcwd() + try: + os.chdir(env['PWD']) + for path in args: + alllines += sort(path) + finally: + os.chdir(curdir) + + alllines.sort() + for line in alllines: + stdout.write(line) + return 0 + +#------------------------------------------------------------------------------- +# hg utility +#------------------------------------------------------------------------------- + +hgcommands = [ + 'add', + 'addremove', + 'commit', 'ci', + 'debugrename', + 'debugwalk', + 'falabala', # Dummy command used in a mercurial test + 'incoming', + 'locate', + 'pull', + 'push', + 'qinit', + 'remove', 'rm', + 'rename', 'mv', + 'revert', + 'showconfig', + 'status', 'st', + 'strip', + ] + +def rewriteslashes(name, args): + # Several hg commands output file paths, rewrite the separators + if len(args) > 1 and name.lower().endswith('python') \ + and args[0].endswith('hg'): + for cmd in hgcommands: + if cmd in args[1:]: + return True + + # svn output contains many paths with OS specific separators. + # Normalize these to unix paths. + base = os.path.basename(name) + if base.startswith('svn'): + return True + + return False + +def rewritehg(output): + if not output: + return output + # Rewrite os specific messages + output = output.replace(': The system cannot find the file specified', + ': No such file or directory') + output = re.sub(': Access is denied.*$', ': Permission denied', output) + output = output.replace(': No connection could be made because the target machine actively refused it', + ': Connection refused') + return output + + +def run_command(name, args, interp, env, stdin, stdout, + stderr, debugflags): + # Execute the command + if 'debug-utility' in debugflags: + print interp.log(' '.join([name, str(args), interp['PWD']]) + '\n') + + hgbin = interp.options().hgbinary + ishg = hgbin and ('hg' in name or args and 'hg' in args[0]) + unixoutput = 'cygwin' in name or ishg + + exec_env = env.get_variables() + try: + # BUG: comparing file descriptor is clearly not a reliable way to tell + # whether they point on the same underlying object. But in pysh limited + # scope this is usually right, we do not expect complicated redirections + # besides usual 2>&1. + # Still there is one case we have but cannot deal with is when stdout + # and stderr are redirected *by pysh caller*. This the reason for the + # --redirect pysh() option. + # Now, we want to know they are the same because we sometimes need to + # transform the command output, mostly remove CR-LF to ensure that + # command output is unix-like. Cygwin utilies are a special case because + # they explicitely set their output streams to binary mode, so we have + # nothing to do. For all others commands, we have to guess whether they + # are sending text data, in which case the transformation must be done. + # Again, the NUL character test is unreliable but should be enough for + # hg tests. + redirected = stdout.fileno()==stderr.fileno() + if not redirected: + p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env, + stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.PIPE) + else: + p = subprocess.Popen([name] + args, cwd=env['PWD'], env=exec_env, + stdin=stdin, stdout=subprocess.PIPE, stderr=subprocess.STDOUT) + out, err = p.communicate() + except WindowsError as e: + raise UtilityError(str(e)) + + if not unixoutput: + def encode(s): + if '\0' in s: + return s + return s.replace('\r\n', '\n') + else: + encode = lambda s: s + + if rewriteslashes(name, args): + encode1_ = encode + def encode(s): + s = encode1_(s) + s = s.replace('\\\\', '\\') + s = s.replace('\\', '/') + return s + + if ishg: + encode2_ = encode + def encode(s): + return rewritehg(encode2_(s)) + + stdout.write(encode(out)) + if not redirected: + stderr.write(encode(err)) + return p.returncode + diff --git a/bitbake/lib/bb/pysh/interp.py b/bitbake/lib/bb/pysh/interp.py new file mode 100644 index 0000000000..25d8c92ec4 --- /dev/null +++ b/bitbake/lib/bb/pysh/interp.py @@ -0,0 +1,1367 @@ +# interp.py - shell interpreter for pysh. +# +# Copyright 2007 Patrick Mezard +# +# This software may be used and distributed according to the terms +# of the GNU General Public License, incorporated herein by reference. + +"""Implement the shell interpreter. + +Most references are made to "The Open Group Base Specifications Issue 6". + +""" +# TODO: document the fact input streams must implement fileno() so Popen will work correctly. +# it requires non-stdin stream to be implemented as files. Still to be tested... +# DOC: pathsep is used in PATH instead of ':'. Clearly, there are path syntax issues here. +# TODO: stop command execution upon error. +# TODO: sort out the filename/io_number mess. It should be possible to use filenames only. +# TODO: review subshell implementation +# TODO: test environment cloning for non-special builtins +# TODO: set -x should not rebuild commands from tokens, assignments/redirections are lost +# TODO: unit test for variable assignment +# TODO: test error management wrt error type/utility type +# TODO: test for binary output everywhere +# BUG: debug-parsing does not pass log file to PLY. Maybe a PLY upgrade is necessary. +import base64 +import cPickle as pickle +import errno +import glob +import os +import re +import subprocess +import sys +import tempfile + +try: + s = set() + del s +except NameError: + from Set import Set as set + +import builtin +from sherrors import * +import pyshlex +import pyshyacc + +def mappend(func, *args, **kargs): + """Like map but assume func returns a list. Returned lists are merged into + a single one. + """ + return reduce(lambda a,b: a+b, map(func, *args, **kargs), []) + +class FileWrapper: + """File object wrapper to ease debugging. + + Allow mode checking and implement file duplication through a simple + reference counting scheme. Not sure the latter is really useful since + only real file descriptors can be used. + """ + def __init__(self, mode, file, close=True): + if mode not in ('r', 'w', 'a'): + raise IOError('invalid mode: %s' % mode) + self._mode = mode + self._close = close + if isinstance(file, FileWrapper): + if file._refcount[0] <= 0: + raise IOError(0, 'Error') + self._refcount = file._refcount + self._refcount[0] += 1 + self._file = file._file + else: + self._refcount = [1] + self._file = file + + def dup(self): + return FileWrapper(self._mode, self, self._close) + + def fileno(self): + """fileno() should be only necessary for input streams.""" + return self._file.fileno() + + def read(self, size=-1): + if self._mode!='r': + raise IOError(0, 'Error') + return self._file.read(size) + + def readlines(self, *args, **kwargs): + return self._file.readlines(*args, **kwargs) + + def write(self, s): + if self._mode not in ('w', 'a'): + raise IOError(0, 'Error') + return self._file.write(s) + + def flush(self): + self._file.flush() + + def close(self): + if not self._refcount: + return + assert self._refcount[0] > 0 + + self._refcount[0] -= 1 + if self._refcount[0] == 0: + self._mode = 'c' + if self._close: + self._file.close() + self._refcount = None + + def mode(self): + return self._mode + + def __getattr__(self, name): + if name == 'name': + self.name = getattr(self._file, name) + return self.name + else: + raise AttributeError(name) + + def __del__(self): + self.close() + + +def win32_open_devnull(mode): + return open('NUL', mode) + + +class Redirections: + """Stores open files and their mapping to pseudo-sh file descriptor. + """ + # BUG: redirections are not handled correctly: 1>&3 2>&3 3>&4 does + # not make 1 to redirect to 4 + def __init__(self, stdin=None, stdout=None, stderr=None): + self._descriptors = {} + if stdin is not None: + self._add_descriptor(0, stdin) + if stdout is not None: + self._add_descriptor(1, stdout) + if stderr is not None: + self._add_descriptor(2, stderr) + + def add_here_document(self, interp, name, content, io_number=None): + if io_number is None: + io_number = 0 + + if name==pyshlex.unquote_wordtree(name): + content = interp.expand_here_document(('TOKEN', content)) + + # Write document content in a temporary file + tmp = tempfile.TemporaryFile() + try: + tmp.write(content) + tmp.flush() + tmp.seek(0) + self._add_descriptor(io_number, FileWrapper('r', tmp)) + except: + tmp.close() + raise + + def add(self, interp, op, filename, io_number=None): + if op not in ('<', '>', '>|', '>>', '>&'): + # TODO: add descriptor duplication and here_documents + raise RedirectionError('Unsupported redirection operator "%s"' % op) + + if io_number is not None: + io_number = int(io_number) + + if (op == '>&' and filename.isdigit()) or filename=='-': + # No expansion for file descriptors, quote them if you want a filename + fullname = filename + else: + if filename.startswith('/'): + # TODO: win32 kludge + if filename=='/dev/null': + fullname = 'NUL' + else: + # TODO: handle absolute pathnames, they are unlikely to exist on the + # current platform (win32 for instance). + raise NotImplementedError() + else: + fullname = interp.expand_redirection(('TOKEN', filename)) + if not fullname: + raise RedirectionError('%s: ambiguous redirect' % filename) + # Build absolute path based on PWD + fullname = os.path.join(interp.get_env()['PWD'], fullname) + + if op=='<': + return self._add_input_redirection(interp, fullname, io_number) + elif op in ('>', '>|'): + clobber = ('>|'==op) + return self._add_output_redirection(interp, fullname, io_number, clobber) + elif op=='>>': + return self._add_output_appending(interp, fullname, io_number) + elif op=='>&': + return self._dup_output_descriptor(fullname, io_number) + + def close(self): + if self._descriptors is not None: + for desc in self._descriptors.itervalues(): + desc.flush() + desc.close() + self._descriptors = None + + def stdin(self): + return self._descriptors[0] + + def stdout(self): + return self._descriptors[1] + + def stderr(self): + return self._descriptors[2] + + def clone(self): + clone = Redirections() + for desc, fileobj in self._descriptors.iteritems(): + clone._descriptors[desc] = fileobj.dup() + return clone + + def _add_output_redirection(self, interp, filename, io_number, clobber): + if io_number is None: + # io_number default to standard output + io_number = 1 + + if not clobber and interp.get_env().has_opt('-C') and os.path.isfile(filename): + # File already exist in no-clobber mode, bail out + raise RedirectionError('File "%s" already exists' % filename) + + # Open and register + self._add_file_descriptor(io_number, filename, 'w') + + def _add_output_appending(self, interp, filename, io_number): + if io_number is None: + io_number = 1 + self._add_file_descriptor(io_number, filename, 'a') + + def _add_input_redirection(self, interp, filename, io_number): + if io_number is None: + io_number = 0 + self._add_file_descriptor(io_number, filename, 'r') + + def _add_file_descriptor(self, io_number, filename, mode): + try: + if filename.startswith('/'): + if filename=='/dev/null': + f = win32_open_devnull(mode+'b') + else: + # TODO: handle absolute pathnames, they are unlikely to exist on the + # current platform (win32 for instance). + raise NotImplementedError('cannot open absolute path %s' % repr(filename)) + else: + f = file(filename, mode+'b') + except IOError as e: + raise RedirectionError(str(e)) + + wrapper = None + try: + wrapper = FileWrapper(mode, f) + f = None + self._add_descriptor(io_number, wrapper) + except: + if f: f.close() + if wrapper: wrapper.close() + raise + + def _dup_output_descriptor(self, source_fd, dest_fd): + if source_fd is None: + source_fd = 1 + self._dup_file_descriptor(source_fd, dest_fd, 'w') + + def _dup_file_descriptor(self, source_fd, dest_fd, mode): + source_fd = int(source_fd) + if source_fd not in self._descriptors: + raise RedirectionError('"%s" is not a valid file descriptor' % str(source_fd)) + source = self._descriptors[source_fd] + + if source.mode()!=mode: + raise RedirectionError('Descriptor %s cannot be duplicated in mode "%s"' % (str(source), mode)) + + if dest_fd=='-': + # Close the source descriptor + del self._descriptors[source_fd] + source.close() + else: + dest_fd = int(dest_fd) + if dest_fd not in self._descriptors: + raise RedirectionError('Cannot replace file descriptor %s' % str(dest_fd)) + + dest = self._descriptors[dest_fd] + if dest.mode()!=mode: + raise RedirectionError('Descriptor %s cannot be cannot be redirected in mode "%s"' % (str(dest), mode)) + + self._descriptors[dest_fd] = source.dup() + dest.close() + + def _add_descriptor(self, io_number, file): + io_number = int(io_number) + + if io_number in self._descriptors: + # Close the current descriptor + d = self._descriptors[io_number] + del self._descriptors[io_number] + d.close() + + self._descriptors[io_number] = file + + def __str__(self): + names = [('%d=%r' % (k, getattr(v, 'name', None))) for k,v + in self._descriptors.iteritems()] + names = ','.join(names) + return 'Redirections(%s)' % names + + def __del__(self): + self.close() + +def cygwin_to_windows_path(path): + """Turn /cygdrive/c/foo into c:/foo, or return path if it + is not a cygwin path. + """ + if not path.startswith('/cygdrive/'): + return path + path = path[len('/cygdrive/'):] + path = path[:1] + ':' + path[1:] + return path + +def win32_to_unix_path(path): + if path is not None: + path = path.replace('\\', '/') + return path + +_RE_SHEBANG = re.compile(r'^\#!\s?([^\s]+)(?:\s([^\s]+))?') +_SHEBANG_CMDS = { + '/usr/bin/env': 'env', + '/bin/sh': 'pysh', + 'python': 'python', +} + +def resolve_shebang(path, ignoreshell=False): + """Return a list of arguments as shebang interpreter call or an empty list + if path does not refer to an executable script. + See . + + ignoreshell - set to True to ignore sh shebangs. Return an empty list instead. + """ + try: + f = file(path) + try: + # At most 80 characters in the first line + header = f.read(80).splitlines()[0] + finally: + f.close() + + m = _RE_SHEBANG.search(header) + if not m: + return [] + cmd, arg = m.group(1,2) + if os.path.isfile(cmd): + # Keep this one, the hg script for instance contains a weird windows + # shebang referencing the current python install. + cmdfile = os.path.basename(cmd).lower() + if cmdfile == 'python.exe': + cmd = 'python' + pass + elif cmd not in _SHEBANG_CMDS: + raise CommandNotFound('Unknown interpreter "%s" referenced in '\ + 'shebang' % header) + cmd = _SHEBANG_CMDS.get(cmd) + if cmd is None or (ignoreshell and cmd == 'pysh'): + return [] + if arg is None: + return [cmd, win32_to_unix_path(path)] + return [cmd, arg, win32_to_unix_path(path)] + except IOError as e: + if e.errno!=errno.ENOENT and \ + (e.errno!=errno.EPERM and not os.path.isdir(path)): # Opening a directory raises EPERM + raise + return [] + +def win32_find_in_path(name, path): + if isinstance(path, str): + path = path.split(os.pathsep) + + exts = os.environ.get('PATHEXT', '').lower().split(os.pathsep) + for p in path: + p_name = os.path.join(p, name) + + prefix = resolve_shebang(p_name) + if prefix: + return prefix + + for ext in exts: + p_name_ext = p_name + ext + if os.path.exists(p_name_ext): + return [win32_to_unix_path(p_name_ext)] + return [] + +class Traps(dict): + def __setitem__(self, key, value): + if key not in ('EXIT',): + raise NotImplementedError() + super(Traps, self).__setitem__(key, value) + +# IFS white spaces character class +_IFS_WHITESPACES = (' ', '\t', '\n') + +class Environment: + """Environment holds environment variables, export table, function + definitions and whatever is defined in 2.12 "Shell Execution Environment", + redirection excepted. + """ + def __init__(self, pwd): + self._opt = set() #Shell options + + self._functions = {} + self._env = {'?': '0', '#': '0'} + self._exported = set([ + 'HOME', 'IFS', 'PATH' + ]) + + # Set environment vars with side-effects + self._ifs_ws = None # Set of IFS whitespace characters + self._ifs_re = None # Regular expression used to split between words using IFS classes + self['IFS'] = ''.join(_IFS_WHITESPACES) #Default environment values + self['PWD'] = pwd + self.traps = Traps() + + def clone(self, subshell=False): + env = Environment(self['PWD']) + env._opt = set(self._opt) + for k,v in self.get_variables().iteritems(): + if k in self._exported: + env.export(k,v) + elif subshell: + env[k] = v + + if subshell: + env._functions = dict(self._functions) + + return env + + def __getitem__(self, key): + if key in ('@', '*', '-', '$'): + raise NotImplementedError('%s is not implemented' % repr(key)) + return self._env[key] + + def get(self, key, defval=None): + try: + return self[key] + except KeyError: + return defval + + def __setitem__(self, key, value): + if key=='IFS': + # Update the whitespace/non-whitespace classes + self._update_ifs(value) + elif key=='PWD': + pwd = os.path.abspath(value) + if not os.path.isdir(pwd): + raise VarAssignmentError('Invalid directory %s' % value) + value = pwd + elif key in ('?', '!'): + value = str(int(value)) + self._env[key] = value + + def __delitem__(self, key): + if key in ('IFS', 'PWD', '?'): + raise VarAssignmentError('%s cannot be unset' % key) + del self._env[key] + + def __contains__(self, item): + return item in self._env + + def set_positional_args(self, args): + """Set the content of 'args' as positional argument from 1 to len(args). + Return previous argument as a list of strings. + """ + # Save and remove previous arguments + prevargs = [] + for i in xrange(int(self._env['#'])): + i = str(i+1) + prevargs.append(self._env[i]) + del self._env[i] + self._env['#'] = '0' + + #Set new ones + for i,arg in enumerate(args): + self._env[str(i+1)] = str(arg) + self._env['#'] = str(len(args)) + + return prevargs + + def get_positional_args(self): + return [self._env[str(i+1)] for i in xrange(int(self._env['#']))] + + def get_variables(self): + return dict(self._env) + + def export(self, key, value=None): + if value is not None: + self[key] = value + self._exported.add(key) + + def get_exported(self): + return [(k,self._env.get(k)) for k in self._exported] + + def split_fields(self, word): + if not self._ifs_ws or not word: + return [word] + return re.split(self._ifs_re, word) + + def _update_ifs(self, value): + """Update the split_fields related variables when IFS character set is + changed. + """ + # TODO: handle NULL IFS + + # Separate characters in whitespace and non-whitespace + chars = set(value) + ws = [c for c in chars if c in _IFS_WHITESPACES] + nws = [c for c in chars if c not in _IFS_WHITESPACES] + + # Keep whitespaces in a string for left and right stripping + self._ifs_ws = ''.join(ws) + + # Build a regexp to split fields + trailing = '[' + ''.join([re.escape(c) for c in ws]) + ']' + if nws: + # First, the single non-whitespace occurence. + nws = '[' + ''.join([re.escape(c) for c in nws]) + ']' + nws = '(?:' + trailing + '*' + nws + trailing + '*' + '|' + trailing + '+)' + else: + # Then mix all parts with quantifiers + nws = trailing + '+' + self._ifs_re = re.compile(nws) + + def has_opt(self, opt, val=None): + return (opt, val) in self._opt + + def set_opt(self, opt, val=None): + self._opt.add((opt, val)) + + def find_in_path(self, name, pwd=False): + path = self._env.get('PATH', '').split(os.pathsep) + if pwd: + path[:0] = [self['PWD']] + if os.name == 'nt': + return win32_find_in_path(name, self._env.get('PATH', '')) + else: + raise NotImplementedError() + + def define_function(self, name, body): + if not is_name(name): + raise ShellSyntaxError('%s is not a valid function name' % repr(name)) + self._functions[name] = body + + def remove_function(self, name): + del self._functions[name] + + def is_function(self, name): + return name in self._functions + + def get_function(self, name): + return self._functions.get(name) + + +name_charset = 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789_' +name_charset = dict(zip(name_charset,name_charset)) + +def match_name(s): + """Return the length in characters of the longest prefix made of name + allowed characters in s. + """ + for i,c in enumerate(s): + if c not in name_charset: + return s[:i] + return s + +def is_name(s): + return len([c for c in s if c not in name_charset])<=0 + +def is_special_param(c): + return len(c)==1 and c in ('@','*','#','?','-','$','!','0') + +def utility_not_implemented(name, *args, **kwargs): + raise NotImplementedError('%s utility is not implemented' % name) + + +class Utility: + """Define utilities properties: + func -- utility callable. See builtin module for utility samples. + is_special -- see XCU 2.8. + """ + def __init__(self, func, is_special=0): + self.func = func + self.is_special = bool(is_special) + + +def encodeargs(args): + def encodearg(s): + lines = base64.encodestring(s) + lines = [l.splitlines()[0] for l in lines] + return ''.join(lines) + + s = pickle.dumps(args) + return encodearg(s) + +def decodeargs(s): + s = base64.decodestring(s) + return pickle.loads(s) + + +class GlobError(Exception): + pass + +class Options: + def __init__(self): + # True if Mercurial operates with binary streams + self.hgbinary = True + +class Interpreter: + # Implementation is very basic: the execute() method just makes a DFS on the + # AST and execute nodes one by one. Nodes are tuple (name,obj) where name + # is a string identifier and obj the AST element returned by the parser. + # + # Handler are named after the node identifiers. + # TODO: check node names and remove the switch in execute with some + # dynamic getattr() call to find node handlers. + """Shell interpreter. + + The following debugging flags can be passed: + debug-parsing - enable PLY debugging. + debug-tree - print the generated AST. + debug-cmd - trace command execution before word expansion, plus exit status. + debug-utility - trace utility execution. + """ + + # List supported commands. + COMMANDS = { + 'cat': Utility(builtin.utility_cat,), + 'cd': Utility(builtin.utility_cd,), + ':': Utility(builtin.utility_colon,), + 'echo': Utility(builtin.utility_echo), + 'env': Utility(builtin.utility_env), + 'exit': Utility(builtin.utility_exit), + 'export': Utility(builtin.builtin_export, is_special=1), + 'egrep': Utility(builtin.utility_egrep), + 'fgrep': Utility(builtin.utility_fgrep), + 'gunzip': Utility(builtin.utility_gunzip), + 'kill': Utility(builtin.utility_kill), + 'mkdir': Utility(builtin.utility_mkdir), + 'netstat': Utility(builtin.utility_netstat), + 'printf': Utility(builtin.utility_printf), + 'pwd': Utility(builtin.utility_pwd), + 'return': Utility(builtin.builtin_return, is_special=1), + 'sed': Utility(builtin.utility_sed,), + 'set': Utility(builtin.builtin_set,), + 'shift': Utility(builtin.builtin_shift,), + 'sleep': Utility(builtin.utility_sleep,), + 'sort': Utility(builtin.utility_sort,), + 'trap': Utility(builtin.builtin_trap, is_special=1), + 'true': Utility(builtin.utility_true), + 'unset': Utility(builtin.builtin_unset, is_special=1), + 'wait': Utility(builtin.builtin_wait, is_special=1), + } + + def __init__(self, pwd, debugflags = [], env=None, redirs=None, stdin=None, + stdout=None, stderr=None, opts=Options()): + self._env = env + if self._env is None: + self._env = Environment(pwd) + self._children = {} + + self._redirs = redirs + self._close_redirs = False + + if self._redirs is None: + if stdin is None: + stdin = sys.stdin + if stdout is None: + stdout = sys.stdout + if stderr is None: + stderr = sys.stderr + stdin = FileWrapper('r', stdin, False) + stdout = FileWrapper('w', stdout, False) + stderr = FileWrapper('w', stderr, False) + self._redirs = Redirections(stdin, stdout, stderr) + self._close_redirs = True + + self._debugflags = list(debugflags) + self._logfile = sys.stderr + self._options = opts + + def close(self): + """Must be called when the interpreter is no longer used.""" + script = self._env.traps.get('EXIT') + if script: + try: + self.execute_script(script=script) + except: + pass + + if self._redirs is not None and self._close_redirs: + self._redirs.close() + self._redirs = None + + def log(self, s): + self._logfile.write(s) + self._logfile.flush() + + def __getitem__(self, key): + return self._env[key] + + def __setitem__(self, key, value): + self._env[key] = value + + def options(self): + return self._options + + def redirect(self, redirs, ios): + def add_redir(io): + if isinstance(io, pyshyacc.IORedirect): + redirs.add(self, io.op, io.filename, io.io_number) + else: + redirs.add_here_document(self, io.name, io.content, io.io_number) + + map(add_redir, ios) + return redirs + + def execute_script(self, script=None, ast=None, sourced=False, + scriptpath=None): + """If script is not None, parse the input. Otherwise takes the supplied + AST. Then execute the AST. + Return the script exit status. + """ + try: + if scriptpath is not None: + self._env['0'] = os.path.abspath(scriptpath) + + if script is not None: + debug_parsing = ('debug-parsing' in self._debugflags) + cmds, script = pyshyacc.parse(script, True, debug_parsing) + if 'debug-tree' in self._debugflags: + pyshyacc.print_commands(cmds, self._logfile) + self._logfile.flush() + else: + cmds, script = ast, '' + + status = 0 + for cmd in cmds: + try: + status = self.execute(cmd) + except ExitSignal as e: + if sourced: + raise + status = int(e.args[0]) + return status + except ShellError: + self._env['?'] = 1 + raise + if 'debug-utility' in self._debugflags or 'debug-cmd' in self._debugflags: + self.log('returncode ' + str(status)+ '\n') + return status + except CommandNotFound as e: + print >>self._redirs.stderr, str(e) + self._redirs.stderr.flush() + # Command not found by non-interactive shell + # return 127 + raise + except RedirectionError as e: + # TODO: should be handled depending on the utility status + print >>self._redirs.stderr, str(e) + self._redirs.stderr.flush() + # Command not found by non-interactive shell + # return 127 + raise + + def dotcommand(self, env, args): + if len(args) < 1: + raise ShellError('. expects at least one argument') + path = args[0] + if '/' not in path: + found = env.find_in_path(args[0], True) + if found: + path = found[0] + script = file(path).read() + return self.execute_script(script=script, sourced=True) + + def execute(self, token, redirs=None): + """Execute and AST subtree with supplied redirections overriding default + interpreter ones. + Return the exit status. + """ + if not token: + return 0 + + if redirs is None: + redirs = self._redirs + + if isinstance(token, list): + # Commands sequence + res = 0 + for t in token: + res = self.execute(t, redirs) + return res + + type, value = token + status = 0 + if type=='simple_command': + redirs_copy = redirs.clone() + try: + # TODO: define and handle command return values + # TODO: implement set -e + status = self._execute_simple_command(value, redirs_copy) + finally: + redirs_copy.close() + elif type=='pipeline': + status = self._execute_pipeline(value, redirs) + elif type=='and_or': + status = self._execute_and_or(value, redirs) + elif type=='for_clause': + status = self._execute_for_clause(value, redirs) + elif type=='while_clause': + status = self._execute_while_clause(value, redirs) + elif type=='function_definition': + status = self._execute_function_definition(value, redirs) + elif type=='brace_group': + status = self._execute_brace_group(value, redirs) + elif type=='if_clause': + status = self._execute_if_clause(value, redirs) + elif type=='subshell': + status = self.subshell(ast=value.cmds, redirs=redirs) + elif type=='async': + status = self._asynclist(value) + elif type=='redirect_list': + redirs_copy = self.redirect(redirs.clone(), value.redirs) + try: + status = self.execute(value.cmd, redirs_copy) + finally: + redirs_copy.close() + else: + raise NotImplementedError('Unsupported token type ' + type) + + if status < 0: + status = 255 + return status + + def _execute_if_clause(self, if_clause, redirs): + cond_status = self.execute(if_clause.cond, redirs) + if cond_status==0: + return self.execute(if_clause.if_cmds, redirs) + else: + return self.execute(if_clause.else_cmds, redirs) + + def _execute_brace_group(self, group, redirs): + status = 0 + for cmd in group.cmds: + status = self.execute(cmd, redirs) + return status + + def _execute_function_definition(self, fundef, redirs): + self._env.define_function(fundef.name, fundef.body) + return 0 + + def _execute_while_clause(self, while_clause, redirs): + status = 0 + while 1: + cond_status = 0 + for cond in while_clause.condition: + cond_status = self.execute(cond, redirs) + + if cond_status: + break + + for cmd in while_clause.cmds: + status = self.execute(cmd, redirs) + + return status + + def _execute_for_clause(self, for_clause, redirs): + if not is_name(for_clause.name): + raise ShellSyntaxError('%s is not a valid name' % repr(for_clause.name)) + items = mappend(self.expand_token, for_clause.items) + + status = 0 + for item in items: + self._env[for_clause.name] = item + for cmd in for_clause.cmds: + status = self.execute(cmd, redirs) + return status + + def _execute_and_or(self, or_and, redirs): + res = self.execute(or_and.left, redirs) + if (or_and.op=='&&' and res==0) or (or_and.op!='&&' and res!=0): + res = self.execute(or_and.right, redirs) + return res + + def _execute_pipeline(self, pipeline, redirs): + if len(pipeline.commands)==1: + status = self.execute(pipeline.commands[0], redirs) + else: + # Execute all commands one after the other + status = 0 + inpath, outpath = None, None + try: + # Commands inputs and outputs cannot really be plugged as done + # by a real shell. Run commands sequentially and chain their + # input/output throught temporary files. + tmpfd, inpath = tempfile.mkstemp() + os.close(tmpfd) + tmpfd, outpath = tempfile.mkstemp() + os.close(tmpfd) + + inpath = win32_to_unix_path(inpath) + outpath = win32_to_unix_path(outpath) + + for i, cmd in enumerate(pipeline.commands): + call_redirs = redirs.clone() + try: + if i!=0: + call_redirs.add(self, '<', inpath) + if i!=len(pipeline.commands)-1: + call_redirs.add(self, '>', outpath) + + status = self.execute(cmd, call_redirs) + + # Chain inputs/outputs + inpath, outpath = outpath, inpath + finally: + call_redirs.close() + finally: + if inpath: os.remove(inpath) + if outpath: os.remove(outpath) + + if pipeline.reverse_status: + status = int(not status) + self._env['?'] = status + return status + + def _execute_function(self, name, args, interp, env, stdin, stdout, stderr, *others): + assert interp is self + + func = env.get_function(name) + #Set positional parameters + prevargs = None + try: + prevargs = env.set_positional_args(args) + try: + redirs = Redirections(stdin.dup(), stdout.dup(), stderr.dup()) + try: + status = self.execute(func, redirs) + finally: + redirs.close() + except ReturnSignal as e: + status = int(e.args[0]) + env['?'] = status + return status + finally: + #Reset positional parameters + if prevargs is not None: + env.set_positional_args(prevargs) + + def _execute_simple_command(self, token, redirs): + """Can raise ReturnSignal when return builtin is called, ExitSignal when + exit is called, and other shell exceptions upon builtin failures. + """ + debug_command = 'debug-cmd' in self._debugflags + if debug_command: + self.log('word' + repr(token.words) + '\n') + self.log('assigns' + repr(token.assigns) + '\n') + self.log('redirs' + repr(token.redirs) + '\n') + + is_special = None + env = self._env + + try: + # Word expansion + args = [] + for word in token.words: + args += self.expand_token(word) + if is_special is None and args: + is_special = env.is_function(args[0]) or \ + (args[0] in self.COMMANDS and self.COMMANDS[args[0]].is_special) + + if debug_command: + self.log('_execute_simple_command' + str(args) + '\n') + + if not args: + # Redirections happen is a subshell + redirs = redirs.clone() + elif not is_special: + env = self._env.clone() + + # Redirections + self.redirect(redirs, token.redirs) + + # Variables assignments + res = 0 + for type,(k,v) in token.assigns: + status, expanded = self.expand_variable((k,v)) + if status is not None: + res = status + if args: + env.export(k, expanded) + else: + env[k] = expanded + + if args and args[0] in ('.', 'source'): + res = self.dotcommand(env, args[1:]) + elif args: + if args[0] in self.COMMANDS: + command = self.COMMANDS[args[0]] + elif env.is_function(args[0]): + command = Utility(self._execute_function, is_special=True) + else: + if not '/' in args[0].replace('\\', '/'): + cmd = env.find_in_path(args[0]) + if not cmd: + # TODO: test error code on unknown command => 127 + raise CommandNotFound('Unknown command: "%s"' % args[0]) + else: + # Handle commands like '/cygdrive/c/foo.bat' + cmd = cygwin_to_windows_path(args[0]) + if not os.path.exists(cmd): + raise CommandNotFound('%s: No such file or directory' % args[0]) + shebang = resolve_shebang(cmd) + if shebang: + cmd = shebang + else: + cmd = [cmd] + args[0:1] = cmd + command = Utility(builtin.run_command) + + # Command execution + if 'debug-cmd' in self._debugflags: + self.log('redirections ' + str(redirs) + '\n') + + res = command.func(args[0], args[1:], self, env, + redirs.stdin(), redirs.stdout(), + redirs.stderr(), self._debugflags) + + if self._env.has_opt('-x'): + # Trace command execution in shell environment + # BUG: would be hard to reproduce a real shell behaviour since + # the AST is not annotated with source lines/tokens. + self._redirs.stdout().write(' '.join(args)) + + except ReturnSignal: + raise + except ShellError as e: + if is_special or isinstance(e, (ExitSignal, + ShellSyntaxError, ExpansionError)): + raise e + self._redirs.stderr().write(str(e)+'\n') + return 1 + + return res + + def expand_token(self, word): + """Expand a word as specified in [2.6 Word Expansions]. Return the list + of expanded words. + """ + status, wtrees = self._expand_word(word) + return map(pyshlex.wordtree_as_string, wtrees) + + def expand_variable(self, word): + """Return a status code (or None if no command expansion occurred) + and a single word. + """ + status, wtrees = self._expand_word(word, pathname=False, split=False) + words = map(pyshlex.wordtree_as_string, wtrees) + assert len(words)==1 + return status, words[0] + + def expand_here_document(self, word): + """Return the expanded document as a single word. The here document is + assumed to be unquoted. + """ + status, wtrees = self._expand_word(word, pathname=False, + split=False, here_document=True) + words = map(pyshlex.wordtree_as_string, wtrees) + assert len(words)==1 + return words[0] + + def expand_redirection(self, word): + """Return a single word.""" + return self.expand_variable(word)[1] + + def get_env(self): + return self._env + + def _expand_word(self, token, pathname=True, split=True, here_document=False): + wtree = pyshlex.make_wordtree(token[1], here_document=here_document) + + # TODO: implement tilde expansion + def expand(wtree): + """Return a pseudo wordtree: the tree or its subelements can be empty + lists when no value result from the expansion. + """ + status = None + for part in wtree: + if not isinstance(part, list): + continue + if part[0]in ("'", '\\'): + continue + elif part[0] in ('`', '$('): + status, result = self._expand_command(part) + part[:] = result + elif part[0] in ('$', '${'): + part[:] = self._expand_parameter(part, wtree[0]=='"', split) + elif part[0] in ('', '"'): + status, result = expand(part) + part[:] = result + else: + raise NotImplementedError('%s expansion is not implemented' + % part[0]) + # [] is returned when an expansion result in no-field, + # like an empty $@ + wtree = [p for p in wtree if p != []] + if len(wtree) < 3: + return status, [] + return status, wtree + + status, wtree = expand(wtree) + if len(wtree) == 0: + return status, wtree + wtree = pyshlex.normalize_wordtree(wtree) + + if split: + wtrees = self._split_fields(wtree) + else: + wtrees = [wtree] + + if pathname: + wtrees = mappend(self._expand_pathname, wtrees) + + wtrees = map(self._remove_quotes, wtrees) + return status, wtrees + + def _expand_command(self, wtree): + # BUG: there is something to do with backslashes and quoted + # characters here + command = pyshlex.wordtree_as_string(wtree[1:-1]) + status, output = self.subshell_output(command) + return status, ['', output, ''] + + def _expand_parameter(self, wtree, quoted=False, split=False): + """Return a valid wtree or an empty list when no parameter results.""" + # Get the parameter name + # TODO: implement weird expansion rules with ':' + name = pyshlex.wordtree_as_string(wtree[1:-1]) + if not is_name(name) and not is_special_param(name): + raise ExpansionError('Bad substitution "%s"' % name) + # TODO: implement special parameters + if name in ('@', '*'): + args = self._env.get_positional_args() + if len(args) == 0: + return [] + if len(args)<2: + return ['', ''.join(args), ''] + + sep = self._env.get('IFS', '')[:1] + if split and quoted and name=='@': + # Introduce a new token to tell the caller that these parameters + # cause a split as specified in 2.5.2 + return ['@'] + args + [''] + else: + return ['', sep.join(args), ''] + + return ['', self._env.get(name, ''), ''] + + def _split_fields(self, wtree): + def is_empty(split): + return split==['', '', ''] + + def split_positional(quoted): + # Return a list of wtree split according positional parameters rules. + # All remaining '@' groups are removed. + assert quoted[0]=='"' + + splits = [[]] + for part in quoted: + if not isinstance(part, list) or part[0]!='@': + splits[-1].append(part) + else: + # Empty or single argument list were dealt with already + assert len(part)>3 + # First argument must join with the beginning part of the original word + splits[-1].append(part[1]) + # Create double-quotes expressions for every argument after the first + for arg in part[2:-1]: + splits[-1].append('"') + splits.append(['"', arg]) + return splits + + # At this point, all expansions but pathnames have occured. Only quoted + # and positional sequences remain. Thus, all candidates for field splitting + # are in the tree root, or are positional splits ('@') and lie in root + # children. + if not wtree or wtree[0] not in ('', '"'): + # The whole token is quoted or empty, nothing to split + return [wtree] + + if wtree[0]=='"': + wtree = ['', wtree, ''] + + result = [['', '']] + for part in wtree[1:-1]: + if isinstance(part, list): + if part[0]=='"': + splits = split_positional(part) + if len(splits)<=1: + result[-1] += [part, ''] + else: + # Terminate the current split + result[-1] += [splits[0], ''] + result += splits[1:-1] + # Create a new split + result += [['', splits[-1], '']] + else: + result[-1] += [part, ''] + else: + splits = self._env.split_fields(part) + if len(splits)<=1: + # No split + result[-1][-1] += part + else: + # Terminate the current resulting part and create a new one + result[-1][-1] += splits[0] + result[-1].append('') + result += [['', r, ''] for r in splits[1:-1]] + result += [['', splits[-1]]] + result[-1].append('') + + # Leading and trailing empty groups come from leading/trailing blanks + if result and is_empty(result[-1]): + result[-1:] = [] + if result and is_empty(result[0]): + result[:1] = [] + return result + + def _expand_pathname(self, wtree): + """See [2.6.6 Pathname Expansion].""" + if self._env.has_opt('-f'): + return [wtree] + + # All expansions have been performed, only quoted sequences should remain + # in the tree. Generate the pattern by folding the tree, escaping special + # characters when appear quoted + special_chars = '*?[]' + + def make_pattern(wtree): + subpattern = [] + for part in wtree[1:-1]: + if isinstance(part, list): + part = make_pattern(part) + elif wtree[0]!='': + for c in part: + # Meta-characters cannot be quoted + if c in special_chars: + raise GlobError() + subpattern.append(part) + return ''.join(subpattern) + + def pwd_glob(pattern): + cwd = os.getcwd() + os.chdir(self._env['PWD']) + try: + return glob.glob(pattern) + finally: + os.chdir(cwd) + + #TODO: check working directory issues here wrt relative patterns + try: + pattern = make_pattern(wtree) + paths = pwd_glob(pattern) + except GlobError: + # BUG: Meta-characters were found in quoted sequences. The should + # have been used literally but this is unsupported in current glob module. + # Instead we consider the whole tree must be used literally and + # therefore there is no point in globbing. This is wrong when meta + # characters are mixed with quoted meta in the same pattern like: + # < foo*"py*" > + paths = [] + + if not paths: + return [wtree] + return [['', path, ''] for path in paths] + + def _remove_quotes(self, wtree): + """See [2.6.7 Quote Removal].""" + + def unquote(wtree): + unquoted = [] + for part in wtree[1:-1]: + if isinstance(part, list): + part = unquote(part) + unquoted.append(part) + return ''.join(unquoted) + + return ['', unquote(wtree), ''] + + def subshell(self, script=None, ast=None, redirs=None): + """Execute the script or AST in a subshell, with inherited redirections + if redirs is not None. + """ + if redirs: + sub_redirs = redirs + else: + sub_redirs = redirs.clone() + + subshell = None + try: + subshell = Interpreter(None, self._debugflags, self._env.clone(True), + sub_redirs, opts=self._options) + return subshell.execute_script(script, ast) + finally: + if not redirs: sub_redirs.close() + if subshell: subshell.close() + + def subshell_output(self, script): + """Execute the script in a subshell and return the captured output.""" + # Create temporary file to capture subshell output + tmpfd, tmppath = tempfile.mkstemp() + try: + tmpfile = os.fdopen(tmpfd, 'wb') + stdout = FileWrapper('w', tmpfile) + + redirs = Redirections(self._redirs.stdin().dup(), + stdout, + self._redirs.stderr().dup()) + try: + status = self.subshell(script=script, redirs=redirs) + finally: + redirs.close() + redirs = None + + # Extract subshell standard output + tmpfile = open(tmppath, 'rb') + try: + output = tmpfile.read() + return status, output.rstrip('\n') + finally: + tmpfile.close() + finally: + os.remove(tmppath) + + def _asynclist(self, cmd): + args = (self._env.get_variables(), cmd) + arg = encodeargs(args) + assert len(args) < 30*1024 + cmd = ['pysh.bat', '--ast', '-c', arg] + p = subprocess.Popen(cmd, cwd=self._env['PWD']) + self._children[p.pid] = p + self._env['!'] = p.pid + return 0 + + def wait(self, pids=None): + if not pids: + pids = self._children.keys() + + status = 127 + for pid in pids: + if pid not in self._children: + continue + p = self._children.pop(pid) + status = p.wait() + + return status + diff --git a/bitbake/lib/bb/pysh/lsprof.py b/bitbake/lib/bb/pysh/lsprof.py new file mode 100644 index 0000000000..b1831c22a7 --- /dev/null +++ b/bitbake/lib/bb/pysh/lsprof.py @@ -0,0 +1,116 @@ +#! /usr/bin/env python + +import sys +from _lsprof import Profiler, profiler_entry + +__all__ = ['profile', 'Stats'] + +def profile(f, *args, **kwds): + """XXX docstring""" + p = Profiler() + p.enable(subcalls=True, builtins=True) + try: + f(*args, **kwds) + finally: + p.disable() + return Stats(p.getstats()) + + +class Stats(object): + """XXX docstring""" + + def __init__(self, data): + self.data = data + + def sort(self, crit="inlinetime"): + """XXX docstring""" + if crit not in profiler_entry.__dict__: + raise ValueError("Can't sort by %s" % crit) + self.data.sort(lambda b, a: cmp(getattr(a, crit), + getattr(b, crit))) + for e in self.data: + if e.calls: + e.calls.sort(lambda b, a: cmp(getattr(a, crit), + getattr(b, crit))) + + def pprint(self, top=None, file=None, limit=None, climit=None): + """XXX docstring""" + if file is None: + file = sys.stdout + d = self.data + if top is not None: + d = d[:top] + cols = "% 12s %12s %11.4f %11.4f %s\n" + hcols = "% 12s %12s %12s %12s %s\n" + cols2 = "+%12s %12s %11.4f %11.4f + %s\n" + file.write(hcols % ("CallCount", "Recursive", "Total(ms)", + "Inline(ms)", "module:lineno(function)")) + count = 0 + for e in d: + file.write(cols % (e.callcount, e.reccallcount, e.totaltime, + e.inlinetime, label(e.code))) + count += 1 + if limit is not None and count == limit: + return + ccount = 0 + if e.calls: + for se in e.calls: + file.write(cols % ("+%s" % se.callcount, se.reccallcount, + se.totaltime, se.inlinetime, + "+%s" % label(se.code))) + count += 1 + ccount += 1 + if limit is not None and count == limit: + return + if climit is not None and ccount == climit: + break + + def freeze(self): + """Replace all references to code objects with string + descriptions; this makes it possible to pickle the instance.""" + + # this code is probably rather ickier than it needs to be! + for i in range(len(self.data)): + e = self.data[i] + if not isinstance(e.code, str): + self.data[i] = type(e)((label(e.code),) + e[1:]) + if e.calls: + for j in range(len(e.calls)): + se = e.calls[j] + if not isinstance(se.code, str): + e.calls[j] = type(se)((label(se.code),) + se[1:]) + +_fn2mod = {} + +def label(code): + if isinstance(code, str): + return code + try: + mname = _fn2mod[code.co_filename] + except KeyError: + for k, v in sys.modules.items(): + if v is None: + continue + if not hasattr(v, '__file__'): + continue + if not isinstance(v.__file__, str): + continue + if v.__file__.startswith(code.co_filename): + mname = _fn2mod[code.co_filename] = k + break + else: + mname = _fn2mod[code.co_filename] = '<%s>'%code.co_filename + + return '%s:%d(%s)' % (mname, code.co_firstlineno, code.co_name) + + +if __name__ == '__main__': + import os + sys.argv = sys.argv[1:] + if not sys.argv: + print >> sys.stderr, "usage: lsprof.py