summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
authorChris Larson <chris_larson@mentor.com>2010-03-24 16:56:12 -0700
committerRichard Purdie <rpurdie@linux.intel.com>2010-07-02 15:41:32 +0100
commit7acc132cac873e60005516272473a55a8160b9c4 (patch)
tree2e4122862ffd856803160b6089fcb979d3efd630
parentbbf83fd988ca3cf9dae7d2b542a11a7c942b1702 (diff)
downloadpoky-7acc132cac873e60005516272473a55a8160b9c4.tar.gz
Formatting cleanups
(Bitbake rev: 2caf134b43a44dad30af4fbe33033b3c58deee57) Signed-off-by: Chris Larson <chris_larson@mentor.com> Signed-off-by: Richard Purdie <rpurdie@linux.intel.com>
-rwxr-xr-xbitbake/bin/bitbake98
-rw-r--r--bitbake/lib/bb/COW.py20
-rw-r--r--bitbake/lib/bb/build.py11
-rw-r--r--bitbake/lib/bb/cache.py28
-rw-r--r--bitbake/lib/bb/command.py7
-rw-r--r--bitbake/lib/bb/cooker.py25
-rw-r--r--bitbake/lib/bb/daemonize.py295
-rw-r--r--bitbake/lib/bb/data.py35
-rw-r--r--bitbake/lib/bb/data_smart.py46
-rw-r--r--bitbake/lib/bb/event.py5
-rw-r--r--bitbake/lib/bb/fetch/__init__.py32
-rw-r--r--bitbake/lib/bb/fetch/bzr.py7
-rw-r--r--bitbake/lib/bb/fetch/cvs.py2
-rw-r--r--bitbake/lib/bb/fetch/git.py11
-rw-r--r--bitbake/lib/bb/fetch/hg.py7
-rw-r--r--bitbake/lib/bb/fetch/local.py6
-rw-r--r--bitbake/lib/bb/fetch/osc.py10
-rw-r--r--bitbake/lib/bb/fetch/perforce.py8
-rw-r--r--bitbake/lib/bb/fetch/svn.py2
-rw-r--r--bitbake/lib/bb/msg.py2
-rw-r--r--bitbake/lib/bb/parse/ast.py2
-rw-r--r--bitbake/lib/bb/parse/parse_py/BBHandler.py2
-rw-r--r--bitbake/lib/bb/parse/parse_py/ConfHandler.py2
-rw-r--r--bitbake/lib/bb/persist_data.py13
-rw-r--r--bitbake/lib/bb/providers.py16
-rw-r--r--bitbake/lib/bb/runqueue.py93
-rw-r--r--bitbake/lib/bb/server/none.py1
-rw-r--r--bitbake/lib/bb/server/xmlrpc.py9
-rw-r--r--bitbake/lib/bb/shell.py8
-rw-r--r--bitbake/lib/bb/taskdata.py24
-rw-r--r--bitbake/lib/bb/ui/__init__.py1
-rw-r--r--bitbake/lib/bb/ui/crumbs/__init__.py1
-rw-r--r--bitbake/lib/bb/ui/crumbs/buildmanager.py39
-rw-r--r--bitbake/lib/bb/ui/crumbs/runningbuild.py24
-rw-r--r--bitbake/lib/bb/ui/depexp.py3
-rw-r--r--bitbake/lib/bb/ui/goggle.py13
-rw-r--r--bitbake/lib/bb/ui/knotty.py2
-rw-r--r--bitbake/lib/bb/ui/ncurses.py7
-rw-r--r--bitbake/lib/bb/ui/puccho.py110
-rw-r--r--bitbake/lib/bb/ui/uievent.py5
-rw-r--r--bitbake/lib/bb/utils.py185
41 files changed, 595 insertions, 622 deletions
diff --git a/bitbake/bin/bitbake b/bitbake/bin/bitbake
index b577e53895..ba84d2de7f 100755
--- a/bitbake/bin/bitbake
+++ b/bitbake/bin/bitbake
@@ -23,7 +23,8 @@
23# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 23# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
24 24
25import sys, os, getopt, re, time, optparse, xmlrpclib 25import sys, os, getopt, re, time, optparse, xmlrpclib
26sys.path.insert(0,os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])), 'lib')) 26sys.path.insert(0, os.path.join(os.path.dirname(os.path.dirname(sys.argv[0])),
27 'lib'))
27 28
28import warnings 29import warnings
29import bb 30import bb
@@ -40,16 +41,18 @@ if sys.hexversion < 0x020600F0:
40 print "Sorry, python 2.6 or later is required for this version of bitbake" 41 print "Sorry, python 2.6 or later is required for this version of bitbake"
41 sys.exit(1) 42 sys.exit(1)
42 43
44
43#============================================================================# 45#============================================================================#
44# BBOptions 46# BBOptions
45#============================================================================# 47#============================================================================#
46class BBConfiguration( object ): 48class BBConfiguration(object):
47 """ 49 """
48 Manages build options and configurations for one run 50 Manages build options and configurations for one run
49 """ 51 """
50 def __init__( self, options ): 52
53 def __init__(self, options):
51 for key, val in options.__dict__.items(): 54 for key, val in options.__dict__.items():
52 setattr( self, key, val ) 55 setattr(self, key, val)
53 self.pkgs_to_build = [] 56 self.pkgs_to_build = []
54 57
55 58
@@ -90,73 +93,74 @@ def main():
90 print "Sorry, bitbake needs python 2.5 or later." 93 print "Sorry, bitbake needs python 2.5 or later."
91 sys.exit(1) 94 sys.exit(1)
92 95
93 parser = optparse.OptionParser( version = "BitBake Build Tool Core version %s, %%prog version %s" % ( bb.__version__, __version__ ), 96 parser = optparse.OptionParser(
94 usage = """%prog [options] [package ...] 97 version = "BitBake Build Tool Core version %s, %%prog version %s" % (bb.__version__, __version__),
98 usage = """%prog [options] [package ...]
95 99
96Executes the specified task (default is 'build') for a given set of BitBake files. 100Executes the specified task (default is 'build') for a given set of BitBake files.
97It expects that BBFILES is defined, which is a space separated list of files to 101It expects that BBFILES is defined, which is a space separated list of files to
98be executed. BBFILES does support wildcards. 102be executed. BBFILES does support wildcards.
99Default BBFILES are the .bb files in the current directory.""" ) 103Default BBFILES are the .bb files in the current directory.""")
100 104
101 parser.add_option( "-b", "--buildfile", help = "execute the task against this .bb file, rather than a package from BBFILES.", 105 parser.add_option("-b", "--buildfile", help = "execute the task against this .bb file, rather than a package from BBFILES.",
102 action = "store", dest = "buildfile", default = None ) 106 action = "store", dest = "buildfile", default = None)
103 107
104 parser.add_option( "-k", "--continue", help = "continue as much as possible after an error. While the target that failed, and those that depend on it, cannot be remade, the other dependencies of these targets can be processed all the same.", 108 parser.add_option("-k", "--continue", help = "continue as much as possible after an error. While the target that failed, and those that depend on it, cannot be remade, the other dependencies of these targets can be processed all the same.",
105 action = "store_false", dest = "abort", default = True ) 109 action = "store_false", dest = "abort", default = True)
106 110
107 parser.add_option( "-a", "--tryaltconfigs", help = "continue with builds by trying to use alternative providers where possible.", 111 parser.add_option("-a", "--tryaltconfigs", help = "continue with builds by trying to use alternative providers where possible.",
108 action = "store_true", dest = "tryaltconfigs", default = False ) 112 action = "store_true", dest = "tryaltconfigs", default = False)
109 113
110 parser.add_option( "-f", "--force", help = "force run of specified cmd, regardless of stamp status", 114 parser.add_option("-f", "--force", help = "force run of specified cmd, regardless of stamp status",
111 action = "store_true", dest = "force", default = False ) 115 action = "store_true", dest = "force", default = False)
112 116
113 parser.add_option( "-i", "--interactive", help = "drop into the interactive mode also called the BitBake shell.", 117 parser.add_option("-i", "--interactive", help = "drop into the interactive mode also called the BitBake shell.",
114 action = "store_true", dest = "interactive", default = False ) 118 action = "store_true", dest = "interactive", default = False)
115 119
116 parser.add_option( "-c", "--cmd", help = "Specify task to execute. Note that this only executes the specified task for the providee and the packages it depends on, i.e. 'compile' does not implicitly call stage for the dependencies (IOW: use only if you know what you are doing). Depending on the base.bbclass a listtasks tasks is defined and will show available tasks", 120 parser.add_option("-c", "--cmd", help = "Specify task to execute. Note that this only executes the specified task for the providee and the packages it depends on, i.e. 'compile' does not implicitly call stage for the dependencies (IOW: use only if you know what you are doing). Depending on the base.bbclass a listtasks tasks is defined and will show available tasks",
117 action = "store", dest = "cmd" ) 121 action = "store", dest = "cmd")
118 122
119 parser.add_option( "-r", "--read", help = "read the specified file before bitbake.conf", 123 parser.add_option("-r", "--read", help = "read the specified file before bitbake.conf",
120 action = "append", dest = "file", default = [] ) 124 action = "append", dest = "file", default = [])
121 125
122 parser.add_option( "-v", "--verbose", help = "output more chit-chat to the terminal", 126 parser.add_option("-v", "--verbose", help = "output more chit-chat to the terminal",
123 action = "store_true", dest = "verbose", default = False ) 127 action = "store_true", dest = "verbose", default = False)
124 128
125 parser.add_option( "-D", "--debug", help = "Increase the debug level. You can specify this more than once.", 129 parser.add_option("-D", "--debug", help = "Increase the debug level. You can specify this more than once.",
126 action = "count", dest="debug", default = 0) 130 action = "count", dest="debug", default = 0)
127 131
128 parser.add_option( "-n", "--dry-run", help = "don't execute, just go through the motions", 132 parser.add_option("-n", "--dry-run", help = "don't execute, just go through the motions",
129 action = "store_true", dest = "dry_run", default = False ) 133 action = "store_true", dest = "dry_run", default = False)
130 134
131 parser.add_option( "-p", "--parse-only", help = "quit after parsing the BB files (developers only)", 135 parser.add_option("-p", "--parse-only", help = "quit after parsing the BB files (developers only)",
132 action = "store_true", dest = "parse_only", default = False ) 136 action = "store_true", dest = "parse_only", default = False)
133 137
134 parser.add_option( "-d", "--disable-psyco", help = "disable using the psyco just-in-time compiler (not recommended)", 138 parser.add_option("-d", "--disable-psyco", help = "disable using the psyco just-in-time compiler (not recommended)",
135 action = "store_true", dest = "disable_psyco", default = False ) 139 action = "store_true", dest = "disable_psyco", default = False)
136 140
137 parser.add_option( "-s", "--show-versions", help = "show current and preferred versions of all packages", 141 parser.add_option("-s", "--show-versions", help = "show current and preferred versions of all packages",
138 action = "store_true", dest = "show_versions", default = False ) 142 action = "store_true", dest = "show_versions", default = False)
139 143
140 parser.add_option( "-e", "--environment", help = "show the global or per-package environment (this is what used to be bbread)", 144 parser.add_option("-e", "--environment", help = "show the global or per-package environment (this is what used to be bbread)",
141 action = "store_true", dest = "show_environment", default = False ) 145 action = "store_true", dest = "show_environment", default = False)
142 146
143 parser.add_option( "-g", "--graphviz", help = "emit the dependency trees of the specified packages in the dot syntax", 147 parser.add_option("-g", "--graphviz", help = "emit the dependency trees of the specified packages in the dot syntax",
144 action = "store_true", dest = "dot_graph", default = False ) 148 action = "store_true", dest = "dot_graph", default = False)
145 149
146 parser.add_option( "-I", "--ignore-deps", help = """Assume these dependencies don't exist and are already provided (equivalent to ASSUME_PROVIDED). Useful to make dependency graphs more appealing""", 150 parser.add_option("-I", "--ignore-deps", help = """Assume these dependencies don't exist and are already provided (equivalent to ASSUME_PROVIDED). Useful to make dependency graphs more appealing""",
147 action = "append", dest = "extra_assume_provided", default = [] ) 151 action = "append", dest = "extra_assume_provided", default = [])
148 152
149 parser.add_option( "-l", "--log-domains", help = """Show debug logging for the specified logging domains""", 153 parser.add_option("-l", "--log-domains", help = """Show debug logging for the specified logging domains""",
150 action = "append", dest = "debug_domains", default = [] ) 154 action = "append", dest = "debug_domains", default = [])
151 155
152 parser.add_option( "-P", "--profile", help = "profile the command and print a report", 156 parser.add_option("-P", "--profile", help = "profile the command and print a report",
153 action = "store_true", dest = "profile", default = False ) 157 action = "store_true", dest = "profile", default = False)
154 158
155 parser.add_option( "-u", "--ui", help = "userinterface to use", 159 parser.add_option("-u", "--ui", help = "userinterface to use",
156 action = "store", dest = "ui") 160 action = "store", dest = "ui")
157 161
158 parser.add_option( "", "--revisions-changed", help = "Set the exit code depending on whether upstream floating revisions have changed or not", 162 parser.add_option("", "--revisions-changed", help = "Set the exit code depending on whether upstream floating revisions have changed or not",
159 action = "store_true", dest = "revisions_changed", default = False ) 163 action = "store_true", dest = "revisions_changed", default = False)
160 164
161 options, args = parser.parse_args(sys.argv) 165 options, args = parser.parse_args(sys.argv)
162 166
@@ -168,7 +172,7 @@ Default BBFILES are the .bb files in the current directory.""" )
168 172
169 # Save a logfile for cooker into the current working directory. When the 173 # Save a logfile for cooker into the current working directory. When the
170 # server is daemonized this logfile will be truncated. 174 # server is daemonized this logfile will be truncated.
171 cooker_logfile = os.path.join (os.getcwd(), "cooker.log") 175 cooker_logfile = os.path.join(os.getcwd(), "cooker.log")
172 176
173 bb.utils.init_logger(bb.msg, configuration.verbose, configuration.debug, 177 bb.utils.init_logger(bb.msg, configuration.verbose, configuration.debug,
174 configuration.debug_domains) 178 configuration.debug_domains)
@@ -200,7 +204,7 @@ Default BBFILES are the .bb files in the current directory.""" )
200 # Dynamically load the UI based on the ui name. Although we 204 # Dynamically load the UI based on the ui name. Although we
201 # suggest a fixed set this allows you to have flexibility in which 205 # suggest a fixed set this allows you to have flexibility in which
202 # ones are available. 206 # ones are available.
203 uimodule = __import__("bb.ui", fromlist=[ui]) 207 uimodule = __import__("bb.ui", fromlist = [ui])
204 return_value = getattr(uimodule, ui).init(serverConnection.connection, serverConnection.events) 208 return_value = getattr(uimodule, ui).init(serverConnection.connection, serverConnection.events)
205 except AttributeError: 209 except AttributeError:
206 print "FATAL: Invalid user interface '%s' specified. " % ui 210 print "FATAL: Invalid user interface '%s' specified. " % ui
diff --git a/bitbake/lib/bb/COW.py b/bitbake/lib/bb/COW.py
index ca206cf4b4..224213db5c 100644
--- a/bitbake/lib/bb/COW.py
+++ b/bitbake/lib/bb/COW.py
@@ -3,7 +3,7 @@
3# 3#
4# This is a copy on write dictionary and set which abuses classes to try and be nice and fast. 4# This is a copy on write dictionary and set which abuses classes to try and be nice and fast.
5# 5#
6# Copyright (C) 2006 Tim Amsell 6# Copyright (C) 2006 Tim Amsell
7# 7#
8# This program is free software; you can redistribute it and/or modify 8# This program is free software; you can redistribute it and/or modify
9# it under the terms of the GNU General Public License version 2 as 9# it under the terms of the GNU General Public License version 2 as
@@ -18,7 +18,7 @@
18# with this program; if not, write to the Free Software Foundation, Inc., 18# with this program; if not, write to the Free Software Foundation, Inc.,
19# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 19# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
20# 20#
21#Please Note: 21#Please Note:
22# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW. 22# Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW.
23# Assign a file to __warn__ to get warnings about slow operations. 23# Assign a file to __warn__ to get warnings about slow operations.
24# 24#
@@ -40,7 +40,7 @@ MUTABLE = "__mutable__"
40 40
41class COWMeta(type): 41class COWMeta(type):
42 pass 42 pass
43 43
44class COWDictMeta(COWMeta): 44class COWDictMeta(COWMeta):
45 __warn__ = False 45 __warn__ = False
46 __hasmutable__ = False 46 __hasmutable__ = False
@@ -64,7 +64,7 @@ class COWDictMeta(COWMeta):
64 cls.__hasmutable__ = True 64 cls.__hasmutable__ = True
65 key += MUTABLE 65 key += MUTABLE
66 setattr(cls, key, value) 66 setattr(cls, key, value)
67 67
68 def __getmutable__(cls, key, readonly=False): 68 def __getmutable__(cls, key, readonly=False):
69 nkey = key + MUTABLE 69 nkey = key + MUTABLE
70 try: 70 try:
@@ -98,8 +98,8 @@ class COWDictMeta(COWMeta):
98 value = getattr(cls, key) 98 value = getattr(cls, key)
99 except AttributeError: 99 except AttributeError:
100 value = cls.__getmutable__(key, readonly) 100 value = cls.__getmutable__(key, readonly)
101 101
102 # This is for values which have been deleted 102 # This is for values which have been deleted
103 if value is cls.__marker__: 103 if value is cls.__marker__:
104 raise AttributeError("key %s does not exist." % key) 104 raise AttributeError("key %s does not exist." % key)
105 105
@@ -127,7 +127,7 @@ class COWDictMeta(COWMeta):
127 def iter(cls, type, readonly=False): 127 def iter(cls, type, readonly=False):
128 for key in dir(cls): 128 for key in dir(cls):
129 if key.startswith("__"): 129 if key.startswith("__"):
130 continue 130 continue
131 131
132 if key.endswith(MUTABLE): 132 if key.endswith(MUTABLE):
133 key = key[:-len(MUTABLE)] 133 key = key[:-len(MUTABLE)]
@@ -176,13 +176,13 @@ class COWSetMeta(COWDictMeta):
176 176
177 def remove(cls, value): 177 def remove(cls, value):
178 COWDictMeta.__delitem__(cls, repr(hash(value))) 178 COWDictMeta.__delitem__(cls, repr(hash(value)))
179 179
180 def __in__(cls, value): 180 def __in__(cls, value):
181 return COWDictMeta.has_key(repr(hash(value))) 181 return COWDictMeta.has_key(repr(hash(value)))
182 182
183 def iterkeys(cls): 183 def iterkeys(cls):
184 raise TypeError("sets don't have keys") 184 raise TypeError("sets don't have keys")
185 185
186 def iteritems(cls): 186 def iteritems(cls):
187 raise TypeError("sets don't have 'items'") 187 raise TypeError("sets don't have 'items'")
188 188
@@ -286,7 +286,7 @@ if __name__ == "__main__":
286 print "Boo!" 286 print "Boo!"
287 else: 287 else:
288 print "Yay - has_key with delete works!" 288 print "Yay - has_key with delete works!"
289 289
290 print "a", a 290 print "a", a
291 for x in a.iteritems(): 291 for x in a.iteritems():
292 print x 292 print x
diff --git a/bitbake/lib/bb/build.py b/bitbake/lib/bb/build.py
index 16d69281f1..7ca1663b7c 100644
--- a/bitbake/lib/bb/build.py
+++ b/bitbake/lib/bb/build.py
@@ -28,7 +28,7 @@
28from bb import data, event, mkdirhier, utils 28from bb import data, event, mkdirhier, utils
29import bb, os, sys 29import bb, os, sys
30 30
31# When we execute a python function we'd like certain things 31# When we execute a python function we'd like certain things
32# in all namespaces, hence we add them to __builtins__ 32# in all namespaces, hence we add them to __builtins__
33# If we do not do this and use the exec globals, they will 33# If we do not do this and use the exec globals, they will
34# not be available to subfunctions. 34# not be available to subfunctions.
@@ -212,7 +212,7 @@ def exec_func_python(func, d, runfile, logfile):
212 try: 212 try:
213 utils.better_exec(comp, {"d": d}, tmp, bbfile) 213 utils.better_exec(comp, {"d": d}, tmp, bbfile)
214 except: 214 except:
215 (t,value,tb) = sys.exc_info() 215 (t, value, tb) = sys.exc_info()
216 216
217 if t in [bb.parse.SkipPackage, bb.build.FuncFailed]: 217 if t in [bb.parse.SkipPackage, bb.build.FuncFailed]:
218 raise 218 raise
@@ -303,8 +303,8 @@ def exec_task(task, d):
303 303
304def extract_stamp(d, fn): 304def extract_stamp(d, fn):
305 """ 305 """
306 Extracts stamp format which is either a data dictonary (fn unset) 306 Extracts stamp format which is either a data dictonary (fn unset)
307 or a dataCache entry (fn set). 307 or a dataCache entry (fn set).
308 """ 308 """
309 if fn: 309 if fn:
310 return d.stamp[fn] 310 return d.stamp[fn]
@@ -361,7 +361,7 @@ def add_tasks(tasklist, d):
361 if not task in task_deps['tasks']: 361 if not task in task_deps['tasks']:
362 task_deps['tasks'].append(task) 362 task_deps['tasks'].append(task)
363 363
364 flags = data.getVarFlags(task, d) 364 flags = data.getVarFlags(task, d)
365 def getTask(name): 365 def getTask(name):
366 if not name in task_deps: 366 if not name in task_deps:
367 task_deps[name] = {} 367 task_deps[name] = {}
@@ -387,4 +387,3 @@ def remove_task(task, kill, d):
387 If kill is 1, also remove tasks that depend on this task.""" 387 If kill is 1, also remove tasks that depend on this task."""
388 388
389 data.delVarFlag(task, 'task', d) 389 data.delVarFlag(task, 'task', d)
390
diff --git a/bitbake/lib/bb/cache.py b/bitbake/lib/bb/cache.py
index 106621911b..300acc5fc6 100644
--- a/bitbake/lib/bb/cache.py
+++ b/bitbake/lib/bb/cache.py
@@ -73,7 +73,7 @@ class Cache:
73 # cache there isn't even any point in loading it... 73 # cache there isn't even any point in loading it...
74 newest_mtime = 0 74 newest_mtime = 0
75 deps = bb.data.getVar("__depends", data, True) 75 deps = bb.data.getVar("__depends", data, True)
76 for f,old_mtime in deps: 76 for f, old_mtime in deps:
77 if old_mtime > newest_mtime: 77 if old_mtime > newest_mtime:
78 newest_mtime = old_mtime 78 newest_mtime = old_mtime
79 79
@@ -102,10 +102,10 @@ class Cache:
102 """ 102 """
103 Gets the value of a variable 103 Gets the value of a variable
104 (similar to getVar in the data class) 104 (similar to getVar in the data class)
105 105
106 There are two scenarios: 106 There are two scenarios:
107 1. We have cached data - serve from depends_cache[fn] 107 1. We have cached data - serve from depends_cache[fn]
108 2. We're learning what data to cache - serve from data 108 2. We're learning what data to cache - serve from data
109 backend but add a copy of the data to the cache. 109 backend but add a copy of the data to the cache.
110 """ 110 """
111 if fn in self.clean: 111 if fn in self.clean:
@@ -134,7 +134,7 @@ class Cache:
134 self.data = data 134 self.data = data
135 135
136 # Make sure __depends makes the depends_cache 136 # Make sure __depends makes the depends_cache
137 # If we're a virtual class we need to make sure all our depends are appended 137 # If we're a virtual class we need to make sure all our depends are appended
138 # to the depends of fn. 138 # to the depends of fn.
139 depends = self.getVar("__depends", virtualfn, True) or [] 139 depends = self.getVar("__depends", virtualfn, True) or []
140 self.depends_cache.setdefault(fn, {}) 140 self.depends_cache.setdefault(fn, {})
@@ -259,7 +259,7 @@ class Cache:
259 self.remove(fn) 259 self.remove(fn)
260 return False 260 return False
261 261
262 mtime = bb.parse.cached_mtime_noerror(fn) 262 mtime = bb.parse.cached_mtime_noerror(fn)
263 263
264 # Check file still exists 264 # Check file still exists
265 if mtime == 0: 265 if mtime == 0:
@@ -276,7 +276,7 @@ class Cache:
276 # Check dependencies are still valid 276 # Check dependencies are still valid
277 depends = self.getVar("__depends", fn, True) 277 depends = self.getVar("__depends", fn, True)
278 if depends: 278 if depends:
279 for f,old_mtime in depends: 279 for f, old_mtime in depends:
280 fmtime = bb.parse.cached_mtime_noerror(f) 280 fmtime = bb.parse.cached_mtime_noerror(f)
281 # Check if file still exists 281 # Check if file still exists
282 if old_mtime != 0 and fmtime == 0: 282 if old_mtime != 0 and fmtime == 0:
@@ -346,7 +346,7 @@ class Cache:
346 346
347 def handle_data(self, file_name, cacheData): 347 def handle_data(self, file_name, cacheData):
348 """ 348 """
349 Save data we need into the cache 349 Save data we need into the cache
350 """ 350 """
351 351
352 pn = self.getVar('PN', file_name, True) 352 pn = self.getVar('PN', file_name, True)
@@ -372,7 +372,7 @@ class Cache:
372 372
373 # build FileName to PackageName lookup table 373 # build FileName to PackageName lookup table
374 cacheData.pkg_fn[file_name] = pn 374 cacheData.pkg_fn[file_name] = pn
375 cacheData.pkg_pepvpr[file_name] = (pe,pv,pr) 375 cacheData.pkg_pepvpr[file_name] = (pe, pv, pr)
376 cacheData.pkg_dp[file_name] = dp 376 cacheData.pkg_dp[file_name] = dp
377 377
378 provides = [pn] 378 provides = [pn]
@@ -401,13 +401,13 @@ class Cache:
401 if not dep in cacheData.all_depends: 401 if not dep in cacheData.all_depends:
402 cacheData.all_depends.append(dep) 402 cacheData.all_depends.append(dep)
403 403
404 # Build reverse hash for PACKAGES, so runtime dependencies 404 # Build reverse hash for PACKAGES, so runtime dependencies
405 # can be be resolved (RDEPENDS, RRECOMMENDS etc.) 405 # can be be resolved (RDEPENDS, RRECOMMENDS etc.)
406 for package in packages: 406 for package in packages:
407 if not package in cacheData.packages: 407 if not package in cacheData.packages:
408 cacheData.packages[package] = [] 408 cacheData.packages[package] = []
409 cacheData.packages[package].append(file_name) 409 cacheData.packages[package].append(file_name)
410 rprovides += (self.getVar("RPROVIDES_%s" % package, file_name, 1) or "").split() 410 rprovides += (self.getVar("RPROVIDES_%s" % package, file_name, 1) or "").split()
411 411
412 for package in packages_dynamic: 412 for package in packages_dynamic:
413 if not package in cacheData.packages_dynamic: 413 if not package in cacheData.packages_dynamic:
@@ -472,12 +472,12 @@ class Cache:
472 472
473def init(cooker): 473def init(cooker):
474 """ 474 """
475 The Objective: Cache the minimum amount of data possible yet get to the 475 The Objective: Cache the minimum amount of data possible yet get to the
476 stage of building packages (i.e. tryBuild) without reparsing any .bb files. 476 stage of building packages (i.e. tryBuild) without reparsing any .bb files.
477 477
478 To do this, we intercept getVar calls and only cache the variables we see 478 To do this, we intercept getVar calls and only cache the variables we see
479 being accessed. We rely on the cache getVar calls being made for all 479 being accessed. We rely on the cache getVar calls being made for all
480 variables bitbake might need to use to reach this stage. For each cached 480 variables bitbake might need to use to reach this stage. For each cached
481 file we need to track: 481 file we need to track:
482 482
483 * Its mtime 483 * Its mtime
diff --git a/bitbake/lib/bb/command.py b/bitbake/lib/bb/command.py
index 06bd203c90..a590e61abe 100644
--- a/bitbake/lib/bb/command.py
+++ b/bitbake/lib/bb/command.py
@@ -20,7 +20,7 @@ Provide an interface to interact with the bitbake server through 'commands'
20# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 20# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 21
22""" 22"""
23The bitbake server takes 'commands' from its UI/commandline. 23The bitbake server takes 'commands' from its UI/commandline.
24Commands are either synchronous or asynchronous. 24Commands are either synchronous or asynchronous.
25Async commands return data to the client in the form of events. 25Async commands return data to the client in the form of events.
26Sync commands must only return data through the function return value 26Sync commands must only return data through the function return value
@@ -62,7 +62,7 @@ class Command:
62 try: 62 try:
63 command = commandline.pop(0) 63 command = commandline.pop(0)
64 if command in CommandsSync.__dict__: 64 if command in CommandsSync.__dict__:
65 # Can run synchronous commands straight away 65 # Can run synchronous commands straight away
66 return getattr(CommandsSync, command)(self.cmds_sync, self, commandline) 66 return getattr(CommandsSync, command)(self.cmds_sync, self, commandline)
67 if self.currentAsyncCommand is not None: 67 if self.currentAsyncCommand is not None:
68 return "Busy (%s in progress)" % self.currentAsyncCommand[0] 68 return "Busy (%s in progress)" % self.currentAsyncCommand[0]
@@ -268,6 +268,3 @@ class CookerCommandSetExitCode(bb.event.Event):
268 def __init__(self, exitcode): 268 def __init__(self, exitcode):
269 bb.event.Event.__init__(self) 269 bb.event.Event.__init__(self)
270 self.exitcode = int(exitcode) 270 self.exitcode = int(exitcode)
271
272
273
diff --git a/bitbake/lib/bb/cooker.py b/bitbake/lib/bb/cooker.py
index a413c8a854..743e4be06b 100644
--- a/bitbake/lib/bb/cooker.py
+++ b/bitbake/lib/bb/cooker.py
@@ -181,7 +181,7 @@ class BBCooker:
181 181
182 def tryBuild(self, fn, task): 182 def tryBuild(self, fn, task):
183 """ 183 """
184 Build a provider and its dependencies. 184 Build a provider and its dependencies.
185 build_depends is a list of previous build dependencies (not runtime) 185 build_depends is a list of previous build dependencies (not runtime)
186 If build_depends is empty, we're dealing with a runtime depends 186 If build_depends is empty, we're dealing with a runtime depends
187 """ 187 """
@@ -206,7 +206,7 @@ class BBCooker:
206 206
207 # Sort by priority 207 # Sort by priority
208 for pn in pkg_pn: 208 for pn in pkg_pn:
209 (last_ver,last_file,pref_ver,pref_file) = bb.providers.findBestProvider(pn, self.configuration.data, self.status) 209 (last_ver, last_file, pref_ver, pref_file) = bb.providers.findBestProvider(pn, self.configuration.data, self.status)
210 preferred_versions[pn] = (pref_ver, pref_file) 210 preferred_versions[pn] = (pref_ver, pref_file)
211 latest_versions[pn] = (last_ver, last_file) 211 latest_versions[pn] = (last_ver, last_file)
212 212
@@ -315,7 +315,7 @@ class BBCooker:
315 rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) 315 rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist)
316 rq.prepare_runqueue() 316 rq.prepare_runqueue()
317 317
318 seen_fnids = [] 318 seen_fnids = []
319 depend_tree = {} 319 depend_tree = {}
320 depend_tree["depends"] = {} 320 depend_tree["depends"] = {}
321 depend_tree["tdepends"] = {} 321 depend_tree["tdepends"] = {}
@@ -352,7 +352,7 @@ class BBCooker:
352 352
353 depend_tree["rdepends-pn"][pn] = [] 353 depend_tree["rdepends-pn"][pn] = []
354 for rdep in taskdata.rdepids[fnid]: 354 for rdep in taskdata.rdepids[fnid]:
355 depend_tree["rdepends-pn"][pn].append(taskdata.run_names_index[rdep]) 355 depend_tree["rdepends-pn"][pn].append(taskdata.run_names_index[rdep])
356 356
357 rdepends = self.status.rundeps[fn] 357 rdepends = self.status.rundeps[fn]
358 for package in rdepends: 358 for package in rdepends:
@@ -542,7 +542,7 @@ class BBCooker:
542 # Nomally we only register event handlers at the end of parsing .bb files 542 # Nomally we only register event handlers at the end of parsing .bb files
543 # We register any handlers we've found so far here... 543 # We register any handlers we've found so far here...
544 for var in data.getVar('__BBHANDLERS', self.configuration.data) or []: 544 for var in data.getVar('__BBHANDLERS', self.configuration.data) or []:
545 bb.event.register(var,bb.data.getVar(var, self.configuration.data)) 545 bb.event.register(var, bb.data.getVar(var, self.configuration.data))
546 546
547 bb.fetch.fetcher_init(self.configuration.data) 547 bb.fetch.fetcher_init(self.configuration.data)
548 548
@@ -583,7 +583,7 @@ class BBCooker:
583 """ 583 """
584 if not bb.data.getVar("BUILDNAME", self.configuration.data): 584 if not bb.data.getVar("BUILDNAME", self.configuration.data):
585 bb.data.setVar("BUILDNAME", os.popen('date +%Y%m%d%H%M').readline().strip(), self.configuration.data) 585 bb.data.setVar("BUILDNAME", os.popen('date +%Y%m%d%H%M').readline().strip(), self.configuration.data)
586 bb.data.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime()), self.configuration.data) 586 bb.data.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S', time.gmtime()), self.configuration.data)
587 587
588 def matchFiles(self, buildfile): 588 def matchFiles(self, buildfile):
589 """ 589 """
@@ -775,10 +775,10 @@ class BBCooker:
775 775
776 ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or "" 776 ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or ""
777 self.status.ignored_dependencies = set(ignore.split()) 777 self.status.ignored_dependencies = set(ignore.split())
778 778
779 for dep in self.configuration.extra_assume_provided: 779 for dep in self.configuration.extra_assume_provided:
780 self.status.ignored_dependencies.add(dep) 780 self.status.ignored_dependencies.add(dep)
781 781
782 self.handleCollections( bb.data.getVar("BBFILE_COLLECTIONS", self.configuration.data, 1) ) 782 self.handleCollections( bb.data.getVar("BBFILE_COLLECTIONS", self.configuration.data, 1) )
783 783
784 bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files") 784 bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files")
@@ -816,7 +816,7 @@ class BBCooker:
816 for f in contents: 816 for f in contents:
817 (root, ext) = os.path.splitext(f) 817 (root, ext) = os.path.splitext(f)
818 if ext == ".bb": 818 if ext == ".bb":
819 bbfiles.append(os.path.abspath(os.path.join(os.getcwd(),f))) 819 bbfiles.append(os.path.abspath(os.path.join(os.getcwd(), f)))
820 return bbfiles 820 return bbfiles
821 821
822 def find_bbfiles( self, path ): 822 def find_bbfiles( self, path ):
@@ -828,7 +828,7 @@ class BBCooker:
828 for ignored in ('SCCS', 'CVS', '.svn'): 828 for ignored in ('SCCS', 'CVS', '.svn'):
829 if ignored in dirs: 829 if ignored in dirs:
830 dirs.remove(ignored) 830 dirs.remove(ignored)
831 found += [join(dir,f) for f in files if f.endswith('.bb')] 831 found += [join(dir, f) for f in files if f.endswith('.bb')]
832 832
833 return found 833 return found
834 834
@@ -912,9 +912,9 @@ class BBCooker:
912 pout.close() 912 pout.close()
913 else: 913 else:
914 self.server.serve_forever() 914 self.server.serve_forever()
915 915
916 bb.event.fire(CookerExit(), self.configuration.event_data) 916 bb.event.fire(CookerExit(), self.configuration.event_data)
917 917
918class CookerExit(bb.event.Event): 918class CookerExit(bb.event.Event):
919 """ 919 """
920 Notify clients of the Cooker shutdown 920 Notify clients of the Cooker shutdown
@@ -984,4 +984,3 @@ class CookerParser:
984 raise ParsingErrorsFound 984 raise ParsingErrorsFound
985 return False 985 return False
986 return True 986 return True
987
diff --git a/bitbake/lib/bb/daemonize.py b/bitbake/lib/bb/daemonize.py
index 1a8bb379f4..a944af2238 100644
--- a/bitbake/lib/bb/daemonize.py
+++ b/bitbake/lib/bb/daemonize.py
@@ -3,17 +3,17 @@ Python Deamonizing helper
3 3
4Configurable daemon behaviors: 4Configurable daemon behaviors:
5 5
6 1.) The current working directory set to the "/" directory. 6 1.) The current working directory set to the "/" directory.
7 2.) The current file creation mode mask set to 0. 7 2.) The current file creation mode mask set to 0.
8 3.) Close all open files (1024). 8 3.) Close all open files (1024).
9 4.) Redirect standard I/O streams to "/dev/null". 9 4.) Redirect standard I/O streams to "/dev/null".
10 10
11A failed call to fork() now raises an exception. 11A failed call to fork() now raises an exception.
12 12
13References: 13References:
14 1) Advanced Programming in the Unix Environment: W. Richard Stevens 14 1) Advanced Programming in the Unix Environment: W. Richard Stevens
15 2) Unix Programming Frequently Asked Questions: 15 2) Unix Programming Frequently Asked Questions:
16 http://www.erlenstar.demon.co.uk/unix/faq_toc.html 16 http://www.erlenstar.demon.co.uk/unix/faq_toc.html
17 17
18Modified to allow a function to be daemonized and return for 18Modified to allow a function to be daemonized and return for
19bitbake use by Richard Purdie 19bitbake use by Richard Purdie
@@ -24,8 +24,8 @@ __copyright__ = "Copyright (C) 2005 Chad J. Schroeder"
24__version__ = "0.2" 24__version__ = "0.2"
25 25
26# Standard Python modules. 26# Standard Python modules.
27import os # Miscellaneous OS interfaces. 27import os # Miscellaneous OS interfaces.
28import sys # System-specific parameters and functions. 28import sys # System-specific parameters and functions.
29 29
30# Default daemon parameters. 30# Default daemon parameters.
31# File mode creation mask of the daemon. 31# File mode creation mask of the daemon.
@@ -37,155 +37,154 @@ MAXFD = 1024
37 37
38# The standard I/O file descriptors are redirected to /dev/null by default. 38# The standard I/O file descriptors are redirected to /dev/null by default.
39if (hasattr(os, "devnull")): 39if (hasattr(os, "devnull")):
40 REDIRECT_TO = os.devnull 40 REDIRECT_TO = os.devnull
41else: 41else:
42 REDIRECT_TO = "/dev/null" 42 REDIRECT_TO = "/dev/null"
43 43
44def createDaemon(function, logfile): 44def createDaemon(function, logfile):
45 """ 45 """
46 Detach a process from the controlling terminal and run it in the 46 Detach a process from the controlling terminal and run it in the
47 background as a daemon, returning control to the caller. 47 background as a daemon, returning control to the caller.
48 """ 48 """
49 49
50 try: 50 try:
51 # Fork a child process so the parent can exit. This returns control to 51 # Fork a child process so the parent can exit. This returns control to
52 # the command-line or shell. It also guarantees that the child will not 52 # the command-line or shell. It also guarantees that the child will not
53 # be a process group leader, since the child receives a new process ID 53 # be a process group leader, since the child receives a new process ID
54 # and inherits the parent's process group ID. This step is required 54 # and inherits the parent's process group ID. This step is required
55 # to insure that the next call to os.setsid is successful. 55 # to insure that the next call to os.setsid is successful.
56 pid = os.fork() 56 pid = os.fork()
57 except OSError, e: 57 except OSError, e:
58 raise Exception, "%s [%d]" % (e.strerror, e.errno) 58 raise Exception, "%s [%d]" % (e.strerror, e.errno)
59 59
60 if (pid == 0): # The first child. 60 if (pid == 0): # The first child.
61 # To become the session leader of this new session and the process group 61 # To become the session leader of this new session and the process group
62 # leader of the new process group, we call os.setsid(). The process is 62 # leader of the new process group, we call os.setsid(). The process is
63 # also guaranteed not to have a controlling terminal. 63 # also guaranteed not to have a controlling terminal.
64 os.setsid() 64 os.setsid()
65 65
66 # Is ignoring SIGHUP necessary? 66 # Is ignoring SIGHUP necessary?
67 # 67 #
68 # It's often suggested that the SIGHUP signal should be ignored before 68 # It's often suggested that the SIGHUP signal should be ignored before
69 # the second fork to avoid premature termination of the process. The 69 # the second fork to avoid premature termination of the process. The
70 # reason is that when the first child terminates, all processes, e.g. 70 # reason is that when the first child terminates, all processes, e.g.
71 # the second child, in the orphaned group will be sent a SIGHUP. 71 # the second child, in the orphaned group will be sent a SIGHUP.
72 # 72 #
73 # "However, as part of the session management system, there are exactly 73 # "However, as part of the session management system, there are exactly
74 # two cases where SIGHUP is sent on the death of a process: 74 # two cases where SIGHUP is sent on the death of a process:
75 # 75 #
76 # 1) When the process that dies is the session leader of a session that 76 # 1) When the process that dies is the session leader of a session that
77 # is attached to a terminal device, SIGHUP is sent to all processes 77 # is attached to a terminal device, SIGHUP is sent to all processes
78 # in the foreground process group of that terminal device. 78 # in the foreground process group of that terminal device.
79 # 2) When the death of a process causes a process group to become 79 # 2) When the death of a process causes a process group to become
80 # orphaned, and one or more processes in the orphaned group are 80 # orphaned, and one or more processes in the orphaned group are
81 # stopped, then SIGHUP and SIGCONT are sent to all members of the 81 # stopped, then SIGHUP and SIGCONT are sent to all members of the
82 # orphaned group." [2] 82 # orphaned group." [2]
83 # 83 #
84 # The first case can be ignored since the child is guaranteed not to have 84 # The first case can be ignored since the child is guaranteed not to have
85 # a controlling terminal. The second case isn't so easy to dismiss. 85 # a controlling terminal. The second case isn't so easy to dismiss.
86 # The process group is orphaned when the first child terminates and 86 # The process group is orphaned when the first child terminates and
87 # POSIX.1 requires that every STOPPED process in an orphaned process 87 # POSIX.1 requires that every STOPPED process in an orphaned process
88 # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the 88 # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the
89 # second child is not STOPPED though, we can safely forego ignoring the 89 # second child is not STOPPED though, we can safely forego ignoring the
90 # SIGHUP signal. In any case, there are no ill-effects if it is ignored. 90 # SIGHUP signal. In any case, there are no ill-effects if it is ignored.
91 # 91 #
92 # import signal # Set handlers for asynchronous events. 92 # import signal # Set handlers for asynchronous events.
93 # signal.signal(signal.SIGHUP, signal.SIG_IGN) 93 # signal.signal(signal.SIGHUP, signal.SIG_IGN)
94 94
95 try: 95 try:
96 # Fork a second child and exit immediately to prevent zombies. This 96 # Fork a second child and exit immediately to prevent zombies. This
97 # causes the second child process to be orphaned, making the init 97 # causes the second child process to be orphaned, making the init
98 # process responsible for its cleanup. And, since the first child is 98 # process responsible for its cleanup. And, since the first child is
99 # a session leader without a controlling terminal, it's possible for 99 # a session leader without a controlling terminal, it's possible for
100 # it to acquire one by opening a terminal in the future (System V- 100 # it to acquire one by opening a terminal in the future (System V-
101 # based systems). This second fork guarantees that the child is no 101 # based systems). This second fork guarantees that the child is no
102 # longer a session leader, preventing the daemon from ever acquiring 102 # longer a session leader, preventing the daemon from ever acquiring
103 # a controlling terminal. 103 # a controlling terminal.
104 pid = os.fork() # Fork a second child. 104 pid = os.fork() # Fork a second child.
105 except OSError, e: 105 except OSError, e:
106 raise Exception, "%s [%d]" % (e.strerror, e.errno) 106 raise Exception, "%s [%d]" % (e.strerror, e.errno)
107 107
108 if (pid == 0): # The second child. 108 if (pid == 0): # The second child.
109 # We probably don't want the file mode creation mask inherited from 109 # We probably don't want the file mode creation mask inherited from
110 # the parent, so we give the child complete control over permissions. 110 # the parent, so we give the child complete control over permissions.
111 if UMASK is not None: 111 if UMASK is not None:
112 os.umask(UMASK) 112 os.umask(UMASK)
113 else: 113 else:
114 # Parent (the first child) of the second child. 114 # Parent (the first child) of the second child.
115 os._exit(0) 115 os._exit(0)
116 else: 116 else:
117 # exit() or _exit()? 117 # exit() or _exit()?
118 # _exit is like exit(), but it doesn't call any functions registered 118 # _exit is like exit(), but it doesn't call any functions registered
119 # with atexit (and on_exit) or any registered signal handlers. It also 119 # with atexit (and on_exit) or any registered signal handlers. It also
120 # closes any open file descriptors. Using exit() may cause all stdio 120 # closes any open file descriptors. Using exit() may cause all stdio
121 # streams to be flushed twice and any temporary files may be unexpectedly 121 # streams to be flushed twice and any temporary files may be unexpectedly
122 # removed. It's therefore recommended that child branches of a fork() 122 # removed. It's therefore recommended that child branches of a fork()
123 # and the parent branch(es) of a daemon use _exit(). 123 # and the parent branch(es) of a daemon use _exit().
124 return 124 return
125 125
126 # Close all open file descriptors. This prevents the child from keeping 126 # Close all open file descriptors. This prevents the child from keeping
127 # open any file descriptors inherited from the parent. There is a variety 127 # open any file descriptors inherited from the parent. There is a variety
128 # of methods to accomplish this task. Three are listed below. 128 # of methods to accomplish this task. Three are listed below.
129 # 129 #
130 # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum 130 # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum
131 # number of open file descriptors to close. If it doesn't exists, use 131 # number of open file descriptors to close. If it doesn't exists, use
132 # the default value (configurable). 132 # the default value (configurable).
133 # 133 #
134 # try: 134 # try:
135 # maxfd = os.sysconf("SC_OPEN_MAX") 135 # maxfd = os.sysconf("SC_OPEN_MAX")
136 # except (AttributeError, ValueError): 136 # except (AttributeError, ValueError):
137 # maxfd = MAXFD 137 # maxfd = MAXFD
138 # 138 #
139 # OR 139 # OR
140 # 140 #
141 # if (os.sysconf_names.has_key("SC_OPEN_MAX")): 141 # if (os.sysconf_names.has_key("SC_OPEN_MAX")):
142 # maxfd = os.sysconf("SC_OPEN_MAX") 142 # maxfd = os.sysconf("SC_OPEN_MAX")
143 # else: 143 # else:
144 # maxfd = MAXFD 144 # maxfd = MAXFD
145 # 145 #
146 # OR 146 # OR
147 # 147 #
148 # Use the getrlimit method to retrieve the maximum file descriptor number 148 # Use the getrlimit method to retrieve the maximum file descriptor number
149 # that can be opened by this process. If there is not limit on the 149 # that can be opened by this process. If there is not limit on the
150 # resource, use the default value. 150 # resource, use the default value.
151 # 151 #
152 import resource # Resource usage information. 152 import resource # Resource usage information.
153 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] 153 maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1]
154 if (maxfd == resource.RLIM_INFINITY): 154 if (maxfd == resource.RLIM_INFINITY):
155 maxfd = MAXFD 155 maxfd = MAXFD
156 156
157 # Iterate through and close all file descriptors. 157 # Iterate through and close all file descriptors.
158# for fd in range(0, maxfd): 158# for fd in range(0, maxfd):
159# try: 159# try:
160# os.close(fd) 160# os.close(fd)
161# except OSError: # ERROR, fd wasn't open to begin with (ignored) 161# except OSError: # ERROR, fd wasn't open to begin with (ignored)
162# pass 162# pass
163 163
164 # Redirect the standard I/O file descriptors to the specified file. Since 164 # Redirect the standard I/O file descriptors to the specified file. Since
165 # the daemon has no controlling terminal, most daemons redirect stdin, 165 # the daemon has no controlling terminal, most daemons redirect stdin,
166 # stdout, and stderr to /dev/null. This is done to prevent side-effects 166 # stdout, and stderr to /dev/null. This is done to prevent side-effects
167 # from reads and writes to the standard I/O file descriptors. 167 # from reads and writes to the standard I/O file descriptors.
168 168
169 # This call to open is guaranteed to return the lowest file descriptor, 169 # This call to open is guaranteed to return the lowest file descriptor,
170 # which will be 0 (stdin), since it was closed above. 170 # which will be 0 (stdin), since it was closed above.
171# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) 171# os.open(REDIRECT_TO, os.O_RDWR) # standard input (0)
172 172
173 # Duplicate standard input to standard output and standard error. 173 # Duplicate standard input to standard output and standard error.
174# os.dup2(0, 1) # standard output (1) 174# os.dup2(0, 1) # standard output (1)
175# os.dup2(0, 2) # standard error (2) 175# os.dup2(0, 2) # standard error (2)
176 176
177 177
178 si = file('/dev/null', 'r') 178 si = file('/dev/null', 'r')
179 so = file(logfile, 'w') 179 so = file(logfile, 'w')
180 se = so 180 se = so
181 181
182 182
183 # Replace those fds with our own 183 # Replace those fds with our own
184 os.dup2(si.fileno(), sys.stdin.fileno()) 184 os.dup2(si.fileno(), sys.stdin.fileno())
185 os.dup2(so.fileno(), sys.stdout.fileno()) 185 os.dup2(so.fileno(), sys.stdout.fileno())
186 os.dup2(se.fileno(), sys.stderr.fileno()) 186 os.dup2(se.fileno(), sys.stderr.fileno())
187 187
188 function() 188 function()
189
190 os._exit(0)
191 189
190 os._exit(0)
diff --git a/bitbake/lib/bb/data.py b/bitbake/lib/bb/data.py
index cc5594e41e..e71ad6b9c0 100644
--- a/bitbake/lib/bb/data.py
+++ b/bitbake/lib/bb/data.py
@@ -11,7 +11,7 @@ operations. At night the cookie monster came by and
11suggested 'give me cookies on setting the variables and 11suggested 'give me cookies on setting the variables and
12things will work out'. Taking this suggestion into account 12things will work out'. Taking this suggestion into account
13applying the skills from the not yet passed 'Entwurf und 13applying the skills from the not yet passed 'Entwurf und
14Analyse von Algorithmen' lecture and the cookie 14Analyse von Algorithmen' lecture and the cookie
15monster seems to be right. We will track setVar more carefully 15monster seems to be right. We will track setVar more carefully
16to have faster update_data and expandKeys operations. 16to have faster update_data and expandKeys operations.
17 17
@@ -42,7 +42,7 @@ if sys.argv[0][-5:] == "pydoc":
42 path = os.path.dirname(os.path.dirname(sys.argv[1])) 42 path = os.path.dirname(os.path.dirname(sys.argv[1]))
43else: 43else:
44 path = os.path.dirname(os.path.dirname(sys.argv[0])) 44 path = os.path.dirname(os.path.dirname(sys.argv[0]))
45sys.path.insert(0,path) 45sys.path.insert(0, path)
46 46
47from bb import data_smart 47from bb import data_smart
48import bb 48import bb
@@ -62,14 +62,14 @@ def init_db(parent = None):
62 return _dict_type() 62 return _dict_type()
63 63
64def createCopy(source): 64def createCopy(source):
65 """Link the source set to the destination 65 """Link the source set to the destination
66 If one does not find the value in the destination set, 66 If one does not find the value in the destination set,
67 search will go on to the source set to get the value. 67 search will go on to the source set to get the value.
68 Value from source are copy-on-write. i.e. any try to 68 Value from source are copy-on-write. i.e. any try to
69 modify one of them will end up putting the modified value 69 modify one of them will end up putting the modified value
70 in the destination set. 70 in the destination set.
71 """ 71 """
72 return source.createCopy() 72 return source.createCopy()
73 73
74def initVar(var, d): 74def initVar(var, d):
75 """Non-destructive var init for data structure""" 75 """Non-destructive var init for data structure"""
@@ -78,12 +78,12 @@ def initVar(var, d):
78 78
79def setVar(var, value, d): 79def setVar(var, value, d):
80 """Set a variable to a given value""" 80 """Set a variable to a given value"""
81 d.setVar(var,value) 81 d.setVar(var, value)
82 82
83 83
84def getVar(var, d, exp = 0): 84def getVar(var, d, exp = 0):
85 """Gets the value of a variable""" 85 """Gets the value of a variable"""
86 return d.getVar(var,exp) 86 return d.getVar(var, exp)
87 87
88 88
89def renameVar(key, newkey, d): 89def renameVar(key, newkey, d):
@@ -96,15 +96,15 @@ def delVar(var, d):
96 96
97def setVarFlag(var, flag, flagvalue, d): 97def setVarFlag(var, flag, flagvalue, d):
98 """Set a flag for a given variable to a given value""" 98 """Set a flag for a given variable to a given value"""
99 d.setVarFlag(var,flag,flagvalue) 99 d.setVarFlag(var, flag, flagvalue)
100 100
101def getVarFlag(var, flag, d): 101def getVarFlag(var, flag, d):
102 """Gets given flag from given var""" 102 """Gets given flag from given var"""
103 return d.getVarFlag(var,flag) 103 return d.getVarFlag(var, flag)
104 104
105def delVarFlag(var, flag, d): 105def delVarFlag(var, flag, d):
106 """Removes a given flag from the variable's flags""" 106 """Removes a given flag from the variable's flags"""
107 d.delVarFlag(var,flag) 107 d.delVarFlag(var, flag)
108 108
109def setVarFlags(var, flags, d): 109def setVarFlags(var, flags, d):
110 """Set the flags for a given variable 110 """Set the flags for a given variable
@@ -114,7 +114,7 @@ def setVarFlags(var, flags, d):
114 flags. Think of this method as 114 flags. Think of this method as
115 addVarFlags 115 addVarFlags
116 """ 116 """
117 d.setVarFlags(var,flags) 117 d.setVarFlags(var, flags)
118 118
119def getVarFlags(var, d): 119def getVarFlags(var, d):
120 """Gets a variable's flags""" 120 """Gets a variable's flags"""
@@ -178,7 +178,7 @@ def expandKeys(alterdata, readdata = None):
178 continue 178 continue
179 todolist[key] = ekey 179 todolist[key] = ekey
180 180
181 # These two for loops are split for performance to maximise the 181 # These two for loops are split for performance to maximise the
182 # usefulness of the expand cache 182 # usefulness of the expand cache
183 183
184 for key in todolist: 184 for key in todolist:
@@ -267,7 +267,6 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False):
267 o.write('%s="%s"\n' % (varExpanded, alter)) 267 o.write('%s="%s"\n' % (varExpanded, alter))
268 return 1 268 return 1
269 269
270
271def emit_env(o=sys.__stdout__, d = init(), all=False): 270def emit_env(o=sys.__stdout__, d = init(), all=False):
272 """Emits all items in the data store in a format such that it can be sourced by a shell.""" 271 """Emits all items in the data store in a format such that it can be sourced by a shell."""
273 272
diff --git a/bitbake/lib/bb/data_smart.py b/bitbake/lib/bb/data_smart.py
index 6ea0182852..5ff0b37bfc 100644
--- a/bitbake/lib/bb/data_smart.py
+++ b/bitbake/lib/bb/data_smart.py
@@ -31,11 +31,11 @@ BitBake build tools.
31import copy, os, re, sys, time, types 31import copy, os, re, sys, time, types
32import bb 32import bb
33from bb import utils, methodpool 33from bb import utils, methodpool
34from COW import COWDictBase 34from bb.COW import COWDictBase
35from new import classobj 35from new import classobj
36 36
37 37
38__setvar_keyword__ = ["_append","_prepend"] 38__setvar_keyword__ = ["_append", "_prepend"]
39__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?') 39__setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?')
40__expand_var_regexp__ = re.compile(r"\${[^{}]+}") 40__expand_var_regexp__ = re.compile(r"\${[^{}]+}")
41__expand_python_regexp__ = re.compile(r"\${@.+?}") 41__expand_python_regexp__ = re.compile(r"\${@.+?}")
@@ -51,7 +51,7 @@ class DataSmart:
51 51
52 self.expand_cache = {} 52 self.expand_cache = {}
53 53
54 def expand(self,s, varname): 54 def expand(self, s, varname):
55 def var_sub(match): 55 def var_sub(match):
56 key = match.group()[2:-1] 56 key = match.group()[2:-1]
57 if varname and key: 57 if varname and key:
@@ -165,7 +165,7 @@ class DataSmart:
165 if not var in self.dict: 165 if not var in self.dict:
166 self.dict[var] = {} 166 self.dict[var] = {}
167 167
168 def _findVar(self,var): 168 def _findVar(self, var):
169 _dest = self.dict 169 _dest = self.dict
170 170
171 while (_dest and var not in _dest): 171 while (_dest and var not in _dest):
@@ -189,7 +189,7 @@ class DataSmart:
189 else: 189 else:
190 self.initVar(var) 190 self.initVar(var)
191 191
192 def setVar(self,var,value): 192 def setVar(self, var, value):
193 self.expand_cache = {} 193 self.expand_cache = {}
194 match = __setvar_regexp__.match(var) 194 match = __setvar_regexp__.match(var)
195 if match and match.group("keyword") in __setvar_keyword__: 195 if match and match.group("keyword") in __setvar_keyword__:
@@ -223,16 +223,16 @@ class DataSmart:
223 # setting var 223 # setting var
224 self.dict[var]["content"] = value 224 self.dict[var]["content"] = value
225 225
226 def getVar(self,var,exp): 226 def getVar(self, var, exp):
227 value = self.getVarFlag(var,"content") 227 value = self.getVarFlag(var, "content")
228 228
229 if exp and value: 229 if exp and value:
230 return self.expand(value,var) 230 return self.expand(value, var)
231 return value 231 return value
232 232
233 def renameVar(self, key, newkey): 233 def renameVar(self, key, newkey):
234 """ 234 """
235 Rename the variable key to newkey 235 Rename the variable key to newkey
236 """ 236 """
237 val = self.getVar(key, 0) 237 val = self.getVar(key, 0)
238 if val is not None: 238 if val is not None:
@@ -246,30 +246,30 @@ class DataSmart:
246 dest = self.getVarFlag(newkey, i) or [] 246 dest = self.getVarFlag(newkey, i) or []
247 dest.extend(src) 247 dest.extend(src)
248 self.setVarFlag(newkey, i, dest) 248 self.setVarFlag(newkey, i, dest)
249 249
250 if self._special_values.has_key(i) and key in self._special_values[i]: 250 if self._special_values.has_key(i) and key in self._special_values[i]:
251 self._special_values[i].remove(key) 251 self._special_values[i].remove(key)
252 self._special_values[i].add(newkey) 252 self._special_values[i].add(newkey)
253 253
254 self.delVar(key) 254 self.delVar(key)
255 255
256 def delVar(self,var): 256 def delVar(self, var):
257 self.expand_cache = {} 257 self.expand_cache = {}
258 self.dict[var] = {} 258 self.dict[var] = {}
259 259
260 def setVarFlag(self,var,flag,flagvalue): 260 def setVarFlag(self, var, flag, flagvalue):
261 if not var in self.dict: 261 if not var in self.dict:
262 self._makeShadowCopy(var) 262 self._makeShadowCopy(var)
263 self.dict[var][flag] = flagvalue 263 self.dict[var][flag] = flagvalue
264 264
265 def getVarFlag(self,var,flag): 265 def getVarFlag(self, var, flag):
266 local_var = self._findVar(var) 266 local_var = self._findVar(var)
267 if local_var: 267 if local_var:
268 if flag in local_var: 268 if flag in local_var:
269 return copy.copy(local_var[flag]) 269 return copy.copy(local_var[flag])
270 return None 270 return None
271 271
272 def delVarFlag(self,var,flag): 272 def delVarFlag(self, var, flag):
273 local_var = self._findVar(var) 273 local_var = self._findVar(var)
274 if not local_var: 274 if not local_var:
275 return 275 return
@@ -279,7 +279,7 @@ class DataSmart:
279 if var in self.dict and flag in self.dict[var]: 279 if var in self.dict and flag in self.dict[var]:
280 del self.dict[var][flag] 280 del self.dict[var][flag]
281 281
282 def setVarFlags(self,var,flags): 282 def setVarFlags(self, var, flags):
283 if not var in self.dict: 283 if not var in self.dict:
284 self._makeShadowCopy(var) 284 self._makeShadowCopy(var)
285 285
@@ -288,7 +288,7 @@ class DataSmart:
288 continue 288 continue
289 self.dict[var][i] = flags[i] 289 self.dict[var][i] = flags[i]
290 290
291 def getVarFlags(self,var): 291 def getVarFlags(self, var):
292 local_var = self._findVar(var) 292 local_var = self._findVar(var)
293 flags = {} 293 flags = {}
294 294
@@ -303,7 +303,7 @@ class DataSmart:
303 return flags 303 return flags
304 304
305 305
306 def delVarFlags(self,var): 306 def delVarFlags(self, var):
307 if not var in self.dict: 307 if not var in self.dict:
308 self._makeShadowCopy(var) 308 self._makeShadowCopy(var)
309 309
@@ -333,21 +333,19 @@ class DataSmart:
333 def keys(self): 333 def keys(self):
334 def _keys(d, mykey): 334 def _keys(d, mykey):
335 if "_data" in d: 335 if "_data" in d:
336 _keys(d["_data"],mykey) 336 _keys(d["_data"], mykey)
337 337
338 for key in d.keys(): 338 for key in d.keys():
339 if key != "_data": 339 if key != "_data":
340 mykey[key] = None 340 mykey[key] = None
341 keytab = {} 341 keytab = {}
342 _keys(self.dict,keytab) 342 _keys(self.dict, keytab)
343 return keytab.keys() 343 return keytab.keys()
344 344
345 def __getitem__(self,item): 345 def __getitem__(self, item):
346 #print "Warning deprecated" 346 #print "Warning deprecated"
347 return self.getVar(item, False) 347 return self.getVar(item, False)
348 348
349 def __setitem__(self,var,data): 349 def __setitem__(self, var, data):
350 #print "Warning deprecated" 350 #print "Warning deprecated"
351 self.setVar(var,data) 351 self.setVar(var, data)
352
353
diff --git a/bitbake/lib/bb/event.py b/bitbake/lib/bb/event.py
index aa3a4471d8..456b89caff 100644
--- a/bitbake/lib/bb/event.py
+++ b/bitbake/lib/bb/event.py
@@ -89,9 +89,9 @@ def fire_ui_handlers(event, d):
89def fire(event, d): 89def fire(event, d):
90 """Fire off an Event""" 90 """Fire off an Event"""
91 91
92 # We can fire class handlers in the worker process context and this is 92 # We can fire class handlers in the worker process context and this is
93 # desired so they get the task based datastore. 93 # desired so they get the task based datastore.
94 # UI handlers need to be fired in the server context so we defer this. They 94 # UI handlers need to be fired in the server context so we defer this. They
95 # don't have a datastore so the datastore context isn't a problem. 95 # don't have a datastore so the datastore context isn't a problem.
96 96
97 fire_class_handlers(event, d) 97 fire_class_handlers(event, d)
@@ -297,4 +297,3 @@ class DepTreeGenerated(Event):
297 def __init__(self, depgraph): 297 def __init__(self, depgraph):
298 Event.__init__(self) 298 Event.__init__(self)
299 self._depgraph = depgraph 299 self._depgraph = depgraph
300
diff --git a/bitbake/lib/bb/fetch/__init__.py b/bitbake/lib/bb/fetch/__init__.py
index b1b5eda35b..09c83b0264 100644
--- a/bitbake/lib/bb/fetch/__init__.py
+++ b/bitbake/lib/bb/fetch/__init__.py
@@ -85,7 +85,7 @@ def decodeurl(url):
85 p = {} 85 p = {}
86 if parm: 86 if parm:
87 for s in parm.split(';'): 87 for s in parm.split(';'):
88 s1,s2 = s.split('=') 88 s1, s2 = s.split('=')
89 p[s1] = s2 89 p[s1] = s2
90 90
91 return (type, host, path, user, pswd, p) 91 return (type, host, path, user, pswd, p)
@@ -121,7 +121,7 @@ def uri_replace(uri, uri_find, uri_replace, d):
121 uri_decoded = list(decodeurl(uri)) 121 uri_decoded = list(decodeurl(uri))
122 uri_find_decoded = list(decodeurl(uri_find)) 122 uri_find_decoded = list(decodeurl(uri_find))
123 uri_replace_decoded = list(decodeurl(uri_replace)) 123 uri_replace_decoded = list(decodeurl(uri_replace))
124 result_decoded = ['','','','','',{}] 124 result_decoded = ['', '', '', '', '', {}]
125 for i in uri_find_decoded: 125 for i in uri_find_decoded:
126 loc = uri_find_decoded.index(i) 126 loc = uri_find_decoded.index(i)
127 result_decoded[loc] = uri_decoded[loc] 127 result_decoded[loc] = uri_decoded[loc]
@@ -214,7 +214,7 @@ def init(urls, d, setup = True):
214 if setup: 214 if setup:
215 for url in urldata: 215 for url in urldata:
216 if not urldata[url].setup: 216 if not urldata[url].setup:
217 urldata[url].setup_localpath(d) 217 urldata[url].setup_localpath(d)
218 218
219 urldata_cache[fn] = urldata 219 urldata_cache[fn] = urldata
220 return urldata 220 return urldata
@@ -243,7 +243,7 @@ def go(d, urls = None):
243 continue 243 continue
244 lf = bb.utils.lockfile(ud.lockfile) 244 lf = bb.utils.lockfile(ud.lockfile)
245 if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5): 245 if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5):
246 # If someone else fetched this before we got the lock, 246 # If someone else fetched this before we got the lock,
247 # notice and don't try again 247 # notice and don't try again
248 try: 248 try:
249 os.utime(ud.md5, None) 249 os.utime(ud.md5, None)
@@ -309,7 +309,7 @@ def localpaths(d):
309 urldata = init([], d, True) 309 urldata = init([], d, True)
310 310
311 for u in urldata: 311 for u in urldata:
312 ud = urldata[u] 312 ud = urldata[u]
313 local.append(ud.localpath) 313 local.append(ud.localpath)
314 314
315 return local 315 return local
@@ -321,15 +321,15 @@ def get_srcrev(d):
321 Return the version string for the current package 321 Return the version string for the current package
322 (usually to be used as PV) 322 (usually to be used as PV)
323 Most packages usually only have one SCM so we just pass on the call. 323 Most packages usually only have one SCM so we just pass on the call.
324 In the multi SCM case, we build a value based on SRCREV_FORMAT which must 324 In the multi SCM case, we build a value based on SRCREV_FORMAT which must
325 have been set. 325 have been set.
326 """ 326 """
327 327
328 # 328 #
329 # Ugly code alert. localpath in the fetchers will try to evaluate SRCREV which 329 # Ugly code alert. localpath in the fetchers will try to evaluate SRCREV which
330 # could translate into a call to here. If it does, we need to catch this 330 # could translate into a call to here. If it does, we need to catch this
331 # and provide some way so it knows get_srcrev is active instead of being 331 # and provide some way so it knows get_srcrev is active instead of being
332 # some number etc. hence the srcrev_internal_call tracking and the magic 332 # some number etc. hence the srcrev_internal_call tracking and the magic
333 # "SRCREVINACTION" return value. 333 # "SRCREVINACTION" return value.
334 # 334 #
335 # Neater solutions welcome! 335 # Neater solutions welcome!
@@ -339,7 +339,7 @@ def get_srcrev(d):
339 339
340 scms = [] 340 scms = []
341 341
342 # Only call setup_localpath on URIs which suppports_srcrev() 342 # Only call setup_localpath on URIs which suppports_srcrev()
343 urldata = init(bb.data.getVar('SRC_URI', d, 1).split(), d, False) 343 urldata = init(bb.data.getVar('SRC_URI', d, 1).split(), d, False)
344 for u in urldata: 344 for u in urldata:
345 ud = urldata[u] 345 ud = urldata[u]
@@ -352,7 +352,7 @@ def get_srcrev(d):
352 bb.msg.error(bb.msg.domain.Fetcher, "SRCREV was used yet no valid SCM was found in SRC_URI") 352 bb.msg.error(bb.msg.domain.Fetcher, "SRCREV was used yet no valid SCM was found in SRC_URI")
353 raise ParameterError 353 raise ParameterError
354 354
355 bb.data.setVar('__BB_DONT_CACHE','1', d) 355 bb.data.setVar('__BB_DONT_CACHE', '1', d)
356 356
357 if len(scms) == 1: 357 if len(scms) == 1:
358 return urldata[scms[0]].method.sortable_revision(scms[0], urldata[scms[0]], d) 358 return urldata[scms[0]].method.sortable_revision(scms[0], urldata[scms[0]], d)
@@ -375,7 +375,7 @@ def get_srcrev(d):
375 375
376def localpath(url, d, cache = True): 376def localpath(url, d, cache = True):
377 """ 377 """
378 Called from the parser with cache=False since the cache isn't ready 378 Called from the parser with cache=False since the cache isn't ready
379 at this point. Also called from classed in OE e.g. patch.bbclass 379 at this point. Also called from classed in OE e.g. patch.bbclass
380 """ 380 """
381 ud = init([url], d) 381 ud = init([url], d)
@@ -538,7 +538,7 @@ class Fetch(object):
538 def localpath(self, url, urldata, d): 538 def localpath(self, url, urldata, d):
539 """ 539 """
540 Return the local filename of a given url assuming a successful fetch. 540 Return the local filename of a given url assuming a successful fetch.
541 Can also setup variables in urldata for use in go (saving code duplication 541 Can also setup variables in urldata for use in go (saving code duplication
542 and duplicate code execution) 542 and duplicate code execution)
543 """ 543 """
544 return url 544 return url
@@ -599,8 +599,8 @@ class Fetch(object):
599 """ 599 """
600 Return: 600 Return:
601 a) a source revision if specified 601 a) a source revision if specified
602 b) True if auto srcrev is in action 602 b) True if auto srcrev is in action
603 c) False otherwise 603 c) False otherwise
604 """ 604 """
605 605
606 if 'rev' in ud.parm: 606 if 'rev' in ud.parm:
@@ -632,7 +632,7 @@ class Fetch(object):
632 b) None otherwise 632 b) None otherwise
633 """ 633 """
634 634
635 localcount= None 635 localcount = None
636 if 'name' in ud.parm: 636 if 'name' in ud.parm:
637 pn = data.getVar("PN", d, 1) 637 pn = data.getVar("PN", d, 1)
638 localcount = data.getVar("LOCALCOUNT_" + ud.parm['name'], d, 1) 638 localcount = data.getVar("LOCALCOUNT_" + ud.parm['name'], d, 1)
@@ -685,7 +685,7 @@ class Fetch(object):
685 685
686 def sortable_revision(self, url, ud, d): 686 def sortable_revision(self, url, ud, d):
687 """ 687 """
688 688
689 """ 689 """
690 if hasattr(self, "_sortable_revision"): 690 if hasattr(self, "_sortable_revision"):
691 return self._sortable_revision(url, ud, d) 691 return self._sortable_revision(url, ud, d)
diff --git a/bitbake/lib/bb/fetch/bzr.py b/bitbake/lib/bb/fetch/bzr.py
index c6e33c3343..813d7d8c80 100644
--- a/bitbake/lib/bb/fetch/bzr.py
+++ b/bitbake/lib/bb/fetch/bzr.py
@@ -46,15 +46,15 @@ class Bzr(Fetch):
46 46
47 revision = Fetch.srcrev_internal_helper(ud, d) 47 revision = Fetch.srcrev_internal_helper(ud, d)
48 if revision is True: 48 if revision is True:
49 ud.revision = self.latest_revision(url, ud, d) 49 ud.revision = self.latest_revision(url, ud, d)
50 elif revision: 50 elif revision:
51 ud.revision = revision 51 ud.revision = revision
52 52
53 if not ud.revision: 53 if not ud.revision:
54 ud.revision = self.latest_revision(url, ud, d) 54 ud.revision = self.latest_revision(url, ud, d)
55 55
56 ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d) 56 ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d)
57 57
58 return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) 58 return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile)
59 59
60 def _buildbzrcommand(self, ud, d, command): 60 def _buildbzrcommand(self, ud, d, command):
@@ -145,4 +145,3 @@ class Bzr(Fetch):
145 145
146 def _build_revision(self, url, ud, d): 146 def _build_revision(self, url, ud, d):
147 return ud.revision 147 return ud.revision
148
diff --git a/bitbake/lib/bb/fetch/cvs.py b/bitbake/lib/bb/fetch/cvs.py
index 443f521317..c0d43618f9 100644
--- a/bitbake/lib/bb/fetch/cvs.py
+++ b/bitbake/lib/bb/fetch/cvs.py
@@ -157,7 +157,7 @@ class Cvs(Fetch):
157 try: 157 try:
158 os.rmdir(moddir) 158 os.rmdir(moddir)
159 except OSError: 159 except OSError:
160 pass 160 pass
161 raise FetchError(ud.module) 161 raise FetchError(ud.module)
162 162
163 # tar them up to a defined filename 163 # tar them up to a defined filename
diff --git a/bitbake/lib/bb/fetch/git.py b/bitbake/lib/bb/fetch/git.py
index 41ebc5b998..5332686252 100644
--- a/bitbake/lib/bb/fetch/git.py
+++ b/bitbake/lib/bb/fetch/git.py
@@ -57,12 +57,12 @@ class Git(Fetch):
57 57
58 tag = Fetch.srcrev_internal_helper(ud, d) 58 tag = Fetch.srcrev_internal_helper(ud, d)
59 if tag is True: 59 if tag is True:
60 ud.tag = self.latest_revision(url, ud, d) 60 ud.tag = self.latest_revision(url, ud, d)
61 elif tag: 61 elif tag:
62 ud.tag = tag 62 ud.tag = tag
63 63
64 if not ud.tag or ud.tag == "master": 64 if not ud.tag or ud.tag == "master":
65 ud.tag = self.latest_revision(url, ud, d) 65 ud.tag = self.latest_revision(url, ud, d)
66 66
67 subdir = ud.parm.get("subpath", "") 67 subdir = ud.parm.get("subpath", "")
68 if subdir != "": 68 if subdir != "":
@@ -114,7 +114,7 @@ class Git(Fetch):
114 114
115 os.chdir(ud.clonedir) 115 os.chdir(ud.clonedir)
116 mirror_tarballs = data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) 116 mirror_tarballs = data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True)
117 if mirror_tarballs != "0" or 'fullclone' in ud.parm: 117 if mirror_tarballs != "0" or 'fullclone' in ud.parm:
118 bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git repository") 118 bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git repository")
119 runfetchcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ), d) 119 runfetchcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ), d)
120 120
@@ -188,7 +188,7 @@ class Git(Fetch):
188 188
189 def _sortable_buildindex_disabled(self, url, ud, d, rev): 189 def _sortable_buildindex_disabled(self, url, ud, d, rev):
190 """ 190 """
191 Return a suitable buildindex for the revision specified. This is done by counting revisions 191 Return a suitable buildindex for the revision specified. This is done by counting revisions
192 using "git rev-list" which may or may not work in different circumstances. 192 using "git rev-list" which may or may not work in different circumstances.
193 """ 193 """
194 194
@@ -213,5 +213,4 @@ class Git(Fetch):
213 213
214 buildindex = "%s" % output.split()[0] 214 buildindex = "%s" % output.split()[0]
215 bb.msg.debug(1, bb.msg.domain.Fetcher, "GIT repository for %s in %s is returning %s revisions in rev-list before %s" % (url, ud.clonedir, buildindex, rev)) 215 bb.msg.debug(1, bb.msg.domain.Fetcher, "GIT repository for %s in %s is returning %s revisions in rev-list before %s" % (url, ud.clonedir, buildindex, rev))
216 return buildindex 216 return buildindex
217
diff --git a/bitbake/lib/bb/fetch/hg.py b/bitbake/lib/bb/fetch/hg.py
index d0756382f8..efb3b5c76d 100644
--- a/bitbake/lib/bb/fetch/hg.py
+++ b/bitbake/lib/bb/fetch/hg.py
@@ -134,9 +134,9 @@ class Hg(Fetch):
134 os.chdir(ud.pkgdir) 134 os.chdir(ud.pkgdir)
135 bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % fetchcmd) 135 bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % fetchcmd)
136 runfetchcmd(fetchcmd, d) 136 runfetchcmd(fetchcmd, d)
137 137
138 # Even when we clone (fetch), we still need to update as hg's clone 138 # Even when we clone (fetch), we still need to update as hg's clone
139 # won't checkout the specified revision if its on a branch 139 # won't checkout the specified revision if its on a branch
140 updatecmd = self._buildhgcommand(ud, d, "update") 140 updatecmd = self._buildhgcommand(ud, d, "update")
141 bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd) 141 bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd)
142 runfetchcmd(updatecmd, d) 142 runfetchcmd(updatecmd, d)
@@ -170,4 +170,3 @@ class Hg(Fetch):
170 Return a unique key for the url 170 Return a unique key for the url
171 """ 171 """
172 return "hg:" + ud.moddir 172 return "hg:" + ud.moddir
173
diff --git a/bitbake/lib/bb/fetch/local.py b/bitbake/lib/bb/fetch/local.py
index f9bdf589cb..a2abc8639c 100644
--- a/bitbake/lib/bb/fetch/local.py
+++ b/bitbake/lib/bb/fetch/local.py
@@ -65,8 +65,8 @@ class Local(Fetch):
65 Check the status of the url 65 Check the status of the url
66 """ 66 """
67 if urldata.localpath.find("*") != -1: 67 if urldata.localpath.find("*") != -1:
68 bb.msg.note(1, bb.msg.domain.Fetcher, "URL %s looks like a glob and was therefore not checked." % url) 68 bb.msg.note(1, bb.msg.domain.Fetcher, "URL %s looks like a glob and was therefore not checked." % url)
69 return True 69 return True
70 if os.path.exists(urldata.localpath): 70 if os.path.exists(urldata.localpath):
71 return True 71 return True
72 return False 72 return False
diff --git a/bitbake/lib/bb/fetch/osc.py b/bitbake/lib/bb/fetch/osc.py
index 548dd9d074..ed773939b0 100644
--- a/bitbake/lib/bb/fetch/osc.py
+++ b/bitbake/lib/bb/fetch/osc.py
@@ -16,7 +16,7 @@ from bb.fetch import MissingParameterError
16from bb.fetch import runfetchcmd 16from bb.fetch import runfetchcmd
17 17
18class Osc(Fetch): 18class Osc(Fetch):
19 """Class to fetch a module or modules from Opensuse build server 19 """Class to fetch a module or modules from Opensuse build server
20 repositories.""" 20 repositories."""
21 21
22 def supports(self, url, ud, d): 22 def supports(self, url, ud, d):
@@ -64,7 +64,7 @@ class Osc(Fetch):
64 proto = "ocs" 64 proto = "ocs"
65 if "proto" in ud.parm: 65 if "proto" in ud.parm:
66 proto = ud.parm["proto"] 66 proto = ud.parm["proto"]
67 67
68 options = [] 68 options = []
69 69
70 config = "-c %s" % self.generate_config(ud, d) 70 config = "-c %s" % self.generate_config(ud, d)
@@ -108,7 +108,7 @@ class Osc(Fetch):
108 os.chdir(ud.pkgdir) 108 os.chdir(ud.pkgdir)
109 bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % oscfetchcmd) 109 bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % oscfetchcmd)
110 runfetchcmd(oscfetchcmd, d) 110 runfetchcmd(oscfetchcmd, d)
111 111
112 os.chdir(os.path.join(ud.pkgdir + ud.path)) 112 os.chdir(os.path.join(ud.pkgdir + ud.path))
113 # tar them up to a defined filename 113 # tar them up to a defined filename
114 try: 114 try:
@@ -131,7 +131,7 @@ class Osc(Fetch):
131 131
132 config_path = "%s/oscrc" % data.expand('${OSCDIR}', d) 132 config_path = "%s/oscrc" % data.expand('${OSCDIR}', d)
133 if (os.path.exists(config_path)): 133 if (os.path.exists(config_path)):
134 os.remove(config_path) 134 os.remove(config_path)
135 135
136 f = open(config_path, 'w') 136 f = open(config_path, 'w')
137 f.write("[general]\n") 137 f.write("[general]\n")
@@ -146,5 +146,5 @@ class Osc(Fetch):
146 f.write("user = %s\n" % ud.parm["user"]) 146 f.write("user = %s\n" % ud.parm["user"])
147 f.write("pass = %s\n" % ud.parm["pswd"]) 147 f.write("pass = %s\n" % ud.parm["pswd"])
148 f.close() 148 f.close()
149 149
150 return config_path 150 return config_path
diff --git a/bitbake/lib/bb/fetch/perforce.py b/bitbake/lib/bb/fetch/perforce.py
index 8bc3205c2a..67de6f59fa 100644
--- a/bitbake/lib/bb/fetch/perforce.py
+++ b/bitbake/lib/bb/fetch/perforce.py
@@ -95,7 +95,7 @@ class Perforce(Fetch):
95 return cset.split(' ')[1] 95 return cset.split(' ')[1]
96 getcset = staticmethod(getcset) 96 getcset = staticmethod(getcset)
97 97
98 def localpath(self, url, ud, d): 98 def localpath(self, url, ud, d):
99 99
100 (host,path,user,pswd,parm) = Perforce.doparse(url,d) 100 (host,path,user,pswd,parm) = Perforce.doparse(url,d)
101 101
@@ -180,7 +180,7 @@ class Perforce(Fetch):
180 180
181 count = 0 181 count = 0
182 182
183 for file in p4file: 183 for file in p4file:
184 list = file.split() 184 list = file.split()
185 185
186 if list[2] == "delete": 186 if list[2] == "delete":
@@ -191,7 +191,7 @@ class Perforce(Fetch):
191 191
192 os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module,dest[:where],list[0])) 192 os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module,dest[:where],list[0]))
193 count = count + 1 193 count = count + 1
194 194
195 if count == 0: 195 if count == 0:
196 bb.msg.error(bb.msg.domain.Fetcher, "Fetch: No files gathered from the P4 fetch") 196 bb.msg.error(bb.msg.domain.Fetcher, "Fetch: No files gathered from the P4 fetch")
197 raise FetchError(module) 197 raise FetchError(module)
@@ -205,5 +205,3 @@ class Perforce(Fetch):
205 raise FetchError(module) 205 raise FetchError(module)
206 # cleanup 206 # cleanup
207 os.system('rm -rf %s' % tmpfile) 207 os.system('rm -rf %s' % tmpfile)
208
209
diff --git a/bitbake/lib/bb/fetch/svn.py b/bitbake/lib/bb/fetch/svn.py
index ba9f6ab109..375e8df055 100644
--- a/bitbake/lib/bb/fetch/svn.py
+++ b/bitbake/lib/bb/fetch/svn.py
@@ -78,7 +78,7 @@ class Svn(Fetch):
78 ud.revision = rev 78 ud.revision = rev
79 ud.date = "" 79 ud.date = ""
80 else: 80 else:
81 ud.revision = "" 81 ud.revision = ""
82 82
83 ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d) 83 ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d)
84 84
diff --git a/bitbake/lib/bb/msg.py b/bitbake/lib/bb/msg.py
index b4d3a2c985..17d1a0852b 100644
--- a/bitbake/lib/bb/msg.py
+++ b/bitbake/lib/bb/msg.py
@@ -33,7 +33,7 @@ def _NamedTuple(name, fields):
33 Tuple = collections.namedtuple(name, " ".join(fields)) 33 Tuple = collections.namedtuple(name, " ".join(fields))
34 return Tuple(*range(len(fields))) 34 return Tuple(*range(len(fields)))
35 35
36domain = _NamedTuple("Domain",( 36domain = _NamedTuple("Domain", (
37 "Default", 37 "Default",
38 "Build", 38 "Build",
39 "Cache", 39 "Cache",
diff --git a/bitbake/lib/bb/parse/ast.py b/bitbake/lib/bb/parse/ast.py
index affe4bed4d..6d4f285626 100644
--- a/bitbake/lib/bb/parse/ast.py
+++ b/bitbake/lib/bb/parse/ast.py
@@ -254,7 +254,7 @@ class InheritNode(AstNode):
254 254
255 def eval(self, data): 255 def eval(self, data):
256 bb.parse.BBHandler.inherit(self.n, data) 256 bb.parse.BBHandler.inherit(self.n, data)
257 257
258def handleInclude(statements, m, fn, lineno, force): 258def handleInclude(statements, m, fn, lineno, force):
259 statements.append(IncludeNode(m.group(1), fn, lineno, force)) 259 statements.append(IncludeNode(m.group(1), fn, lineno, force))
260 260
diff --git a/bitbake/lib/bb/parse/parse_py/BBHandler.py b/bitbake/lib/bb/parse/parse_py/BBHandler.py
index 262c883c95..f9f185ff71 100644
--- a/bitbake/lib/bb/parse/parse_py/BBHandler.py
+++ b/bitbake/lib/bb/parse/parse_py/BBHandler.py
@@ -11,7 +11,7 @@
11 11
12# Copyright (C) 2003, 2004 Chris Larson 12# Copyright (C) 2003, 2004 Chris Larson
13# Copyright (C) 2003, 2004 Phil Blundell 13# Copyright (C) 2003, 2004 Phil Blundell
14# 14#
15# This program is free software; you can redistribute it and/or modify 15# This program is free software; you can redistribute it and/or modify
16# it under the terms of the GNU General Public License version 2 as 16# it under the terms of the GNU General Public License version 2 as
17# published by the Free Software Foundation. 17# published by the Free Software Foundation.
diff --git a/bitbake/lib/bb/parse/parse_py/ConfHandler.py b/bitbake/lib/bb/parse/parse_py/ConfHandler.py
index 5c02299524..e50acbe5e1 100644
--- a/bitbake/lib/bb/parse/parse_py/ConfHandler.py
+++ b/bitbake/lib/bb/parse/parse_py/ConfHandler.py
@@ -10,7 +10,7 @@
10 10
11# Copyright (C) 2003, 2004 Chris Larson 11# Copyright (C) 2003, 2004 Chris Larson
12# Copyright (C) 2003, 2004 Phil Blundell 12# Copyright (C) 2003, 2004 Phil Blundell
13# 13#
14# This program is free software; you can redistribute it and/or modify 14# This program is free software; you can redistribute it and/or modify
15# it under the terms of the GNU General Public License version 2 as 15# it under the terms of the GNU General Public License version 2 as
16# published by the Free Software Foundation. 16# published by the Free Software Foundation.
diff --git a/bitbake/lib/bb/persist_data.py b/bitbake/lib/bb/persist_data.py
index bc4045fe85..e2bbbe54f7 100644
--- a/bitbake/lib/bb/persist_data.py
+++ b/bitbake/lib/bb/persist_data.py
@@ -33,11 +33,11 @@ class PersistData:
33 """ 33 """
34 BitBake Persistent Data Store 34 BitBake Persistent Data Store
35 35
36 Used to store data in a central location such that other threads/tasks can 36 Used to store data in a central location such that other threads/tasks can
37 access them at some future date. 37 access them at some future date.
38 38
39 The "domain" is used as a key to isolate each data pool and in this 39 The "domain" is used as a key to isolate each data pool and in this
40 implementation corresponds to an SQL table. The SQL table consists of a 40 implementation corresponds to an SQL table. The SQL table consists of a
41 simple key and value pair. 41 simple key and value pair.
42 42
43 Why sqlite? It handles all the locking issues for us. 43 Why sqlite? It handles all the locking issues for us.
@@ -78,7 +78,7 @@ class PersistData:
78 for row in data: 78 for row in data:
79 ret[str(row[0])] = str(row[1]) 79 ret[str(row[0])] = str(row[1])
80 80
81 return ret 81 return ret
82 82
83 def getValue(self, domain, key): 83 def getValue(self, domain, key):
84 """ 84 """
@@ -108,7 +108,7 @@ class PersistData:
108 self._execute("DELETE from %s where key=?;" % domain, [key]) 108 self._execute("DELETE from %s where key=?;" % domain, [key])
109 109
110 def _execute(self, *query): 110 def _execute(self, *query):
111 while True: 111 while True:
112 try: 112 try:
113 self.connection.execute(*query) 113 self.connection.execute(*query)
114 return 114 return
@@ -116,6 +116,3 @@ class PersistData:
116 if 'database is locked' in str(e): 116 if 'database is locked' in str(e):
117 continue 117 continue
118 raise 118 raise
119
120
121
diff --git a/bitbake/lib/bb/providers.py b/bitbake/lib/bb/providers.py
index 058996ba57..58326f0398 100644
--- a/bitbake/lib/bb/providers.py
+++ b/bitbake/lib/bb/providers.py
@@ -62,7 +62,7 @@ def sortPriorities(pn, dataCache, pkg_pn = None):
62def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): 62def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r):
63 """ 63 """
64 Check if the version pe,pv,pr is the preferred one. 64 Check if the version pe,pv,pr is the preferred one.
65 If there is preferred version defined and ends with '%', then pv has to start with that version after removing the '%' 65 If there is preferred version defined and ends with '%', then pv has to start with that version after removing the '%'
66 """ 66 """
67 if (pr == preferred_r or preferred_r == None): 67 if (pr == preferred_r or preferred_r == None):
68 if (pe == preferred_e or preferred_e == None): 68 if (pe == preferred_e or preferred_e == None):
@@ -103,7 +103,7 @@ def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
103 103
104 for file_set in pkg_pn: 104 for file_set in pkg_pn:
105 for f in file_set: 105 for f in file_set:
106 pe,pv,pr = dataCache.pkg_pepvpr[f] 106 pe, pv, pr = dataCache.pkg_pepvpr[f]
107 if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): 107 if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r):
108 preferred_file = f 108 preferred_file = f
109 preferred_ver = (pe, pv, pr) 109 preferred_ver = (pe, pv, pr)
@@ -136,7 +136,7 @@ def findLatestProvider(pn, cfgData, dataCache, file_set):
136 latest_p = 0 136 latest_p = 0
137 latest_f = None 137 latest_f = None
138 for file_name in file_set: 138 for file_name in file_set:
139 pe,pv,pr = dataCache.pkg_pepvpr[file_name] 139 pe, pv, pr = dataCache.pkg_pepvpr[file_name]
140 dp = dataCache.pkg_dp[file_name] 140 dp = dataCache.pkg_dp[file_name]
141 141
142 if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p): 142 if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p):
@@ -169,14 +169,14 @@ def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None):
169 169
170def _filterProviders(providers, item, cfgData, dataCache): 170def _filterProviders(providers, item, cfgData, dataCache):
171 """ 171 """
172 Take a list of providers and filter/reorder according to the 172 Take a list of providers and filter/reorder according to the
173 environment variables and previous build results 173 environment variables and previous build results
174 """ 174 """
175 eligible = [] 175 eligible = []
176 preferred_versions = {} 176 preferred_versions = {}
177 sortpkg_pn = {} 177 sortpkg_pn = {}
178 178
179 # The order of providers depends on the order of the files on the disk 179 # The order of providers depends on the order of the files on the disk
180 # up to here. Sort pkg_pn to make dependency issues reproducible rather 180 # up to here. Sort pkg_pn to make dependency issues reproducible rather
181 # than effectively random. 181 # than effectively random.
182 providers.sort() 182 providers.sort()
@@ -226,7 +226,7 @@ def _filterProviders(providers, item, cfgData, dataCache):
226 226
227def filterProviders(providers, item, cfgData, dataCache): 227def filterProviders(providers, item, cfgData, dataCache):
228 """ 228 """
229 Take a list of providers and filter/reorder according to the 229 Take a list of providers and filter/reorder according to the
230 environment variables and previous build results 230 environment variables and previous build results
231 Takes a "normal" target item 231 Takes a "normal" target item
232 """ 232 """
@@ -254,7 +254,7 @@ def filterProviders(providers, item, cfgData, dataCache):
254 254
255def filterProvidersRunTime(providers, item, cfgData, dataCache): 255def filterProvidersRunTime(providers, item, cfgData, dataCache):
256 """ 256 """
257 Take a list of providers and filter/reorder according to the 257 Take a list of providers and filter/reorder according to the
258 environment variables and previous build results 258 environment variables and previous build results
259 Takes a "runtime" target item 259 Takes a "runtime" target item
260 """ 260 """
@@ -297,7 +297,7 @@ def getRuntimeProviders(dataCache, rdepend):
297 rproviders = [] 297 rproviders = []
298 298
299 if rdepend in dataCache.rproviders: 299 if rdepend in dataCache.rproviders:
300 rproviders += dataCache.rproviders[rdepend] 300 rproviders += dataCache.rproviders[rdepend]
301 301
302 if rdepend in dataCache.packages: 302 if rdepend in dataCache.packages:
303 rproviders += dataCache.packages[rdepend] 303 rproviders += dataCache.packages[rdepend]
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py
index 9a368b8622..2ecfd09469 100644
--- a/bitbake/lib/bb/runqueue.py
+++ b/bitbake/lib/bb/runqueue.py
@@ -30,7 +30,7 @@ import fcntl
30 30
31class TaskFailure(Exception): 31class TaskFailure(Exception):
32 """Exception raised when a task in a runqueue fails""" 32 """Exception raised when a task in a runqueue fails"""
33 def __init__(self, x): 33 def __init__(self, x):
34 self.args = x 34 self.args = x
35 35
36 36
@@ -60,7 +60,7 @@ class RunQueueStats:
60 def taskActive(self): 60 def taskActive(self):
61 self.active = self.active + 1 61 self.active = self.active + 1
62 62
63# These values indicate the next step due to be run in the 63# These values indicate the next step due to be run in the
64# runQueue state machine 64# runQueue state machine
65runQueuePrepare = 2 65runQueuePrepare = 2
66runQueueRunInit = 3 66runQueueRunInit = 3
@@ -76,7 +76,7 @@ class RunQueueScheduler:
76 """ 76 """
77 def __init__(self, runqueue): 77 def __init__(self, runqueue):
78 """ 78 """
79 The default scheduler just returns the first buildable task (the 79 The default scheduler just returns the first buildable task (the
80 priority map is sorted by task numer) 80 priority map is sorted by task numer)
81 """ 81 """
82 self.rq = runqueue 82 self.rq = runqueue
@@ -123,10 +123,10 @@ class RunQueueSchedulerSpeed(RunQueueScheduler):
123 123
124class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): 124class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
125 """ 125 """
126 A scheduler optimised to complete .bb files are quickly as possible. The 126 A scheduler optimised to complete .bb files are quickly as possible. The
127 priority map is sorted by task weight, but then reordered so once a given 127 priority map is sorted by task weight, but then reordered so once a given
128 .bb file starts to build, its completed as quickly as possible. This works 128 .bb file starts to build, its completed as quickly as possible. This works
129 well where disk space is at a premium and classes like OE's rm_work are in 129 well where disk space is at a premium and classes like OE's rm_work are in
130 force. 130 force.
131 """ 131 """
132 def __init__(self, runqueue): 132 def __init__(self, runqueue):
@@ -135,7 +135,7 @@ class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed):
135 135
136 #FIXME - whilst this groups all fnids together it does not reorder the 136 #FIXME - whilst this groups all fnids together it does not reorder the
137 #fnid groups optimally. 137 #fnid groups optimally.
138 138
139 basemap = deepcopy(self.prio_map) 139 basemap = deepcopy(self.prio_map)
140 self.prio_map = [] 140 self.prio_map = []
141 while (len(basemap) > 0): 141 while (len(basemap) > 0):
@@ -231,7 +231,7 @@ class RunQueue:
231 if chain1[index] != chain2[index]: 231 if chain1[index] != chain2[index]:
232 return False 232 return False
233 return True 233 return True
234 234
235 def chain_array_contains(chain, chain_array): 235 def chain_array_contains(chain, chain_array):
236 """ 236 """
237 Return True if chain_array contains chain 237 Return True if chain_array contains chain
@@ -286,7 +286,7 @@ class RunQueue:
286 286
287 def calculate_task_weights(self, endpoints): 287 def calculate_task_weights(self, endpoints):
288 """ 288 """
289 Calculate a number representing the "weight" of each task. Heavier weighted tasks 289 Calculate a number representing the "weight" of each task. Heavier weighted tasks
290 have more dependencies and hence should be executed sooner for maximum speed. 290 have more dependencies and hence should be executed sooner for maximum speed.
291 291
292 This function also sanity checks the task list finding tasks that its not 292 This function also sanity checks the task list finding tasks that its not
@@ -318,7 +318,7 @@ class RunQueue:
318 task_done[revdep] = True 318 task_done[revdep] = True
319 endpoints = next_points 319 endpoints = next_points
320 if len(next_points) == 0: 320 if len(next_points) == 0:
321 break 321 break
322 322
323 # Circular dependency sanity check 323 # Circular dependency sanity check
324 problem_tasks = [] 324 problem_tasks = []
@@ -345,7 +345,7 @@ class RunQueue:
345 345
346 def prepare_runqueue(self): 346 def prepare_runqueue(self):
347 """ 347 """
348 Turn a set of taskData into a RunQueue and compute data needed 348 Turn a set of taskData into a RunQueue and compute data needed
349 to optimise the execution order. 349 to optimise the execution order.
350 """ 350 """
351 351
@@ -365,12 +365,12 @@ class RunQueue:
365 # Step A - Work out a list of tasks to run 365 # Step A - Work out a list of tasks to run
366 # 366 #
367 # Taskdata gives us a list of possible providers for every build and run 367 # Taskdata gives us a list of possible providers for every build and run
368 # target ordered by priority. It also gives information on each of those 368 # target ordered by priority. It also gives information on each of those
369 # providers. 369 # providers.
370 # 370 #
371 # To create the actual list of tasks to execute we fix the list of 371 # To create the actual list of tasks to execute we fix the list of
372 # providers and then resolve the dependencies into task IDs. This 372 # providers and then resolve the dependencies into task IDs. This
373 # process is repeated for each type of dependency (tdepends, deptask, 373 # process is repeated for each type of dependency (tdepends, deptask,
374 # rdeptast, recrdeptask, idepends). 374 # rdeptast, recrdeptask, idepends).
375 375
376 def add_build_dependencies(depids, tasknames, depends): 376 def add_build_dependencies(depids, tasknames, depends):
@@ -411,12 +411,12 @@ class RunQueue:
411 411
412 if fnid not in taskData.failed_fnids: 412 if fnid not in taskData.failed_fnids:
413 413
414 # Resolve task internal dependencies 414 # Resolve task internal dependencies
415 # 415 #
416 # e.g. addtask before X after Y 416 # e.g. addtask before X after Y
417 depends = taskData.tasks_tdepends[task] 417 depends = taskData.tasks_tdepends[task]
418 418
419 # Resolve 'deptask' dependencies 419 # Resolve 'deptask' dependencies
420 # 420 #
421 # e.g. do_sometask[deptask] = "do_someothertask" 421 # e.g. do_sometask[deptask] = "do_someothertask"
422 # (makes sure sometask runs after someothertask of all DEPENDS) 422 # (makes sure sometask runs after someothertask of all DEPENDS)
@@ -424,7 +424,7 @@ class RunQueue:
424 tasknames = task_deps['deptask'][taskData.tasks_name[task]].split() 424 tasknames = task_deps['deptask'][taskData.tasks_name[task]].split()
425 add_build_dependencies(taskData.depids[fnid], tasknames, depends) 425 add_build_dependencies(taskData.depids[fnid], tasknames, depends)
426 426
427 # Resolve 'rdeptask' dependencies 427 # Resolve 'rdeptask' dependencies
428 # 428 #
429 # e.g. do_sometask[rdeptask] = "do_someothertask" 429 # e.g. do_sometask[rdeptask] = "do_someothertask"
430 # (makes sure sometask runs after someothertask of all RDEPENDS) 430 # (makes sure sometask runs after someothertask of all RDEPENDS)
@@ -432,7 +432,7 @@ class RunQueue:
432 taskname = task_deps['rdeptask'][taskData.tasks_name[task]] 432 taskname = task_deps['rdeptask'][taskData.tasks_name[task]]
433 add_runtime_dependencies(taskData.rdepids[fnid], [taskname], depends) 433 add_runtime_dependencies(taskData.rdepids[fnid], [taskname], depends)
434 434
435 # Resolve inter-task dependencies 435 # Resolve inter-task dependencies
436 # 436 #
437 # e.g. do_sometask[depends] = "targetname:do_someothertask" 437 # e.g. do_sometask[depends] = "targetname:do_someothertask"
438 # (makes sure sometask runs after targetname's someothertask) 438 # (makes sure sometask runs after targetname's someothertask)
@@ -467,8 +467,8 @@ class RunQueue:
467 newdep = [] 467 newdep = []
468 bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends)) 468 bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends))
469 for dep in depends: 469 for dep in depends:
470 if task != dep: 470 if task != dep:
471 newdep.append(dep) 471 newdep.append(dep)
472 depends = newdep 472 depends = newdep
473 473
474 self.runq_fnid.append(taskData.tasks_fnid[task]) 474 self.runq_fnid.append(taskData.tasks_fnid[task])
@@ -482,7 +482,7 @@ class RunQueue:
482 # 482 #
483 # Build a list of recursive cumulative dependencies for each fnid 483 # Build a list of recursive cumulative dependencies for each fnid
484 # We do this by fnid, since if A depends on some task in B 484 # We do this by fnid, since if A depends on some task in B
485 # we're interested in later tasks B's fnid might have but B itself 485 # we're interested in later tasks B's fnid might have but B itself
486 # doesn't depend on 486 # doesn't depend on
487 # 487 #
488 # Algorithm is O(tasks) + O(tasks)*O(fnids) 488 # Algorithm is O(tasks) + O(tasks)*O(fnids)
@@ -513,7 +513,7 @@ class RunQueue:
513 if len(runq_recrdepends[task]) > 0: 513 if len(runq_recrdepends[task]) > 0:
514 taskfnid = self.runq_fnid[task] 514 taskfnid = self.runq_fnid[task]
515 for dep in reccumdepends[taskfnid]: 515 for dep in reccumdepends[taskfnid]:
516 # Ignore self references 516 # Ignore self references
517 if dep == task: 517 if dep == task:
518 continue 518 continue
519 for taskname in runq_recrdepends[task]: 519 for taskname in runq_recrdepends[task]:
@@ -635,7 +635,7 @@ class RunQueue:
635 635
636 bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) 636 bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints))
637 637
638 # Calculate task weights 638 # Calculate task weights
639 # Check of higher length circular dependencies 639 # Check of higher length circular dependencies
640 self.runq_weight = self.calculate_task_weights(endpoints) 640 self.runq_weight = self.calculate_task_weights(endpoints)
641 641
@@ -657,7 +657,7 @@ class RunQueue:
657 for prov in self.dataCache.fn_provides[fn]: 657 for prov in self.dataCache.fn_provides[fn]:
658 if prov not in prov_list: 658 if prov not in prov_list:
659 prov_list[prov] = [fn] 659 prov_list[prov] = [fn]
660 elif fn not in prov_list[prov]: 660 elif fn not in prov_list[prov]:
661 prov_list[prov].append(fn) 661 prov_list[prov].append(fn)
662 error = False 662 error = False
663 for prov in prov_list: 663 for prov in prov_list:
@@ -703,7 +703,7 @@ class RunQueue:
703 buildable.append(task) 703 buildable.append(task)
704 704
705 def check_buildable(self, task, buildable): 705 def check_buildable(self, task, buildable):
706 for revdep in self.runq_revdeps[task]: 706 for revdep in self.runq_revdeps[task]:
707 alldeps = 1 707 alldeps = 1
708 for dep in self.runq_depends[revdep]: 708 for dep in self.runq_depends[revdep]:
709 if dep in unchecked: 709 if dep in unchecked:
@@ -811,10 +811,10 @@ class RunQueue:
811 try: 811 try:
812 t2 = os.stat(stampfile2)[stat.ST_MTIME] 812 t2 = os.stat(stampfile2)[stat.ST_MTIME]
813 if t1 < t2: 813 if t1 < t2:
814 bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile,stampfile2)) 814 bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile, stampfile2))
815 iscurrent = False 815 iscurrent = False
816 except: 816 except:
817 bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2 ,stampfile)) 817 bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2 , stampfile))
818 iscurrent = False 818 iscurrent = False
819 819
820 return iscurrent 820 return iscurrent
@@ -885,7 +885,7 @@ class RunQueue:
885 def task_complete(self, task): 885 def task_complete(self, task):
886 """ 886 """
887 Mark a task as completed 887 Mark a task as completed
888 Look at the reverse dependencies and mark any task with 888 Look at the reverse dependencies and mark any task with
889 completed dependencies as buildable 889 completed dependencies as buildable
890 """ 890 """
891 self.runq_complete[task] = 1 891 self.runq_complete[task] = 1
@@ -1033,10 +1033,10 @@ class RunQueue:
1033 def finish_runqueue_now(self): 1033 def finish_runqueue_now(self):
1034 bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % self.stats.active) 1034 bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % self.stats.active)
1035 for k, v in self.build_pids.iteritems(): 1035 for k, v in self.build_pids.iteritems():
1036 try: 1036 try:
1037 os.kill(-k, signal.SIGINT) 1037 os.kill(-k, signal.SIGINT)
1038 except: 1038 except:
1039 pass 1039 pass
1040 for pipe in self.build_pipes: 1040 for pipe in self.build_pipes:
1041 self.build_pipes[pipe].read() 1041 self.build_pipes[pipe].read()
1042 1042
@@ -1085,30 +1085,30 @@ class RunQueue:
1085 """ 1085 """
1086 bb.msg.debug(3, bb.msg.domain.RunQueue, "run_tasks:") 1086 bb.msg.debug(3, bb.msg.domain.RunQueue, "run_tasks:")
1087 for task in range(len(self.runq_task)): 1087 for task in range(len(self.runq_task)):
1088 bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task, 1088 bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
1089 taskQueue.fn_index[self.runq_fnid[task]], 1089 taskQueue.fn_index[self.runq_fnid[task]],
1090 self.runq_task[task], 1090 self.runq_task[task],
1091 self.runq_weight[task], 1091 self.runq_weight[task],
1092 self.runq_depends[task], 1092 self.runq_depends[task],
1093 self.runq_revdeps[task])) 1093 self.runq_revdeps[task]))
1094 1094
1095 bb.msg.debug(3, bb.msg.domain.RunQueue, "sorted_tasks:") 1095 bb.msg.debug(3, bb.msg.domain.RunQueue, "sorted_tasks:")
1096 for task1 in range(len(self.runq_task)): 1096 for task1 in range(len(self.runq_task)):
1097 if task1 in self.prio_map: 1097 if task1 in self.prio_map:
1098 task = self.prio_map[task1] 1098 task = self.prio_map[task1]
1099 bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task, 1099 bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task,
1100 taskQueue.fn_index[self.runq_fnid[task]], 1100 taskQueue.fn_index[self.runq_fnid[task]],
1101 self.runq_task[task], 1101 self.runq_task[task],
1102 self.runq_weight[task], 1102 self.runq_weight[task],
1103 self.runq_depends[task], 1103 self.runq_depends[task],
1104 self.runq_revdeps[task])) 1104 self.runq_revdeps[task]))
1105 1105
1106 1106
1107class TaskFailure(Exception): 1107class TaskFailure(Exception):
1108 """ 1108 """
1109 Exception raised when a task in a runqueue fails 1109 Exception raised when a task in a runqueue fails
1110 """ 1110 """
1111 def __init__(self, x): 1111 def __init__(self, x):
1112 self.args = x 1112 self.args = x
1113 1113
1114 1114
@@ -1196,4 +1196,3 @@ class runQueuePipe():
1196 if len(self.queue) > 0: 1196 if len(self.queue) > 0:
1197 print "Warning, worker left partial message" 1197 print "Warning, worker left partial message"
1198 os.close(self.fd) 1198 os.close(self.fd)
1199
diff --git a/bitbake/lib/bb/server/none.py b/bitbake/lib/bb/server/none.py
index ebda111582..d4b7fdeea6 100644
--- a/bitbake/lib/bb/server/none.py
+++ b/bitbake/lib/bb/server/none.py
@@ -178,4 +178,3 @@ class BitBakeServerConnection():
178 self.connection.terminateServer() 178 self.connection.terminateServer()
179 except: 179 except:
180 pass 180 pass
181
diff --git a/bitbake/lib/bb/server/xmlrpc.py b/bitbake/lib/bb/server/xmlrpc.py
index 3364918c77..e1e514fc9a 100644
--- a/bitbake/lib/bb/server/xmlrpc.py
+++ b/bitbake/lib/bb/server/xmlrpc.py
@@ -89,8 +89,8 @@ class BitBakeServer(SimpleXMLRPCServer):
89 89
90 def __init__(self, cooker, interface = ("localhost", 0)): 90 def __init__(self, cooker, interface = ("localhost", 0)):
91 """ 91 """
92 Constructor 92 Constructor
93 """ 93 """
94 SimpleXMLRPCServer.__init__(self, interface, 94 SimpleXMLRPCServer.__init__(self, interface,
95 requestHandler=SimpleXMLRPCRequestHandler, 95 requestHandler=SimpleXMLRPCRequestHandler,
96 logRequests=False, allow_none=True) 96 logRequests=False, allow_none=True)
@@ -146,7 +146,7 @@ class BitBakeServer(SimpleXMLRPCServer):
146 traceback.print_exc() 146 traceback.print_exc()
147 pass 147 pass
148 if nextsleep is None and len(self._idlefuns) > 0: 148 if nextsleep is None and len(self._idlefuns) > 0:
149 nextsleep = 0 149 nextsleep = 0
150 self.timeout = nextsleep 150 self.timeout = nextsleep
151 # Tell idle functions we're exiting 151 # Tell idle functions we're exiting
152 for function, data in self._idlefuns.items(): 152 for function, data in self._idlefuns.items():
@@ -175,7 +175,7 @@ class BitBakeServerConnection():
175 def terminate(self): 175 def terminate(self):
176 # Don't wait for server indefinitely 176 # Don't wait for server indefinitely
177 import socket 177 import socket
178 socket.setdefaulttimeout(2) 178 socket.setdefaulttimeout(2)
179 try: 179 try:
180 self.events.system_quit() 180 self.events.system_quit()
181 except: 181 except:
@@ -184,4 +184,3 @@ class BitBakeServerConnection():
184 self.connection.terminateServer() 184 self.connection.terminateServer()
185 except: 185 except:
186 pass 186 pass
187
diff --git a/bitbake/lib/bb/shell.py b/bitbake/lib/bb/shell.py
index 7abea0f126..512bcbf07a 100644
--- a/bitbake/lib/bb/shell.py
+++ b/bitbake/lib/bb/shell.py
@@ -168,7 +168,7 @@ class BitBakeShellCommands:
168 tasks.append([name, "do_%s" % cmd]) 168 tasks.append([name, "do_%s" % cmd])
169 169
170 td.add_unresolved(localdata, cooker.status) 170 td.add_unresolved(localdata, cooker.status)
171 171
172 rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks) 172 rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks)
173 rq.prepare_runqueue() 173 rq.prepare_runqueue()
174 rq.execute_runqueue() 174 rq.execute_runqueue()
@@ -295,7 +295,7 @@ class BitBakeShellCommands:
295 """Show a comprehensive list of commands and their purpose""" 295 """Show a comprehensive list of commands and their purpose"""
296 print "="*30, "Available Commands", "="*30 296 print "="*30, "Available Commands", "="*30
297 for cmd in sorted(cmds): 297 for cmd in sorted(cmds):
298 function,numparams,usage,helptext = cmds[cmd] 298 function, numparams, usage, helptext = cmds[cmd]
299 print "| %s | %s" % (usage.ljust(30), helptext) 299 print "| %s | %s" % (usage.ljust(30), helptext)
300 print "="*78 300 print "="*78
301 301
@@ -343,7 +343,7 @@ class BitBakeShellCommands:
343 return False 343 return False
344 print "SHELL: Creating '%s/%s'" % ( fulldirname, filename ) 344 print "SHELL: Creating '%s/%s'" % ( fulldirname, filename )
345 newpackage = open( "%s/%s" % ( fulldirname, filename ), "w" ) 345 newpackage = open( "%s/%s" % ( fulldirname, filename ), "w" )
346 print >>newpackage,"""DESCRIPTION = "" 346 print >>newpackage, """DESCRIPTION = ""
347SECTION = "" 347SECTION = ""
348AUTHOR = "" 348AUTHOR = ""
349HOMEPAGE = "" 349HOMEPAGE = ""
@@ -583,7 +583,7 @@ def sendToPastebin( desc, content ):
583 mydata["nick"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" ) 583 mydata["nick"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" )
584 mydata["text"] = content 584 mydata["text"] = content
585 params = urllib.urlencode( mydata ) 585 params = urllib.urlencode( mydata )
586 headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"} 586 headers = {"Content-type": "application/x-www-form-urlencoded", "Accept": "text/plain"}
587 587
588 host = "rafb.net" 588 host = "rafb.net"
589 conn = httplib.HTTPConnection( "%s:80" % host ) 589 conn = httplib.HTTPConnection( "%s:80" % host )
diff --git a/bitbake/lib/bb/taskdata.py b/bitbake/lib/bb/taskdata.py
index 3e5e006f5f..58e0d9d8f2 100644
--- a/bitbake/lib/bb/taskdata.py
+++ b/bitbake/lib/bb/taskdata.py
@@ -84,7 +84,7 @@ class TaskData:
84 84
85 def getrun_id(self, name): 85 def getrun_id(self, name):
86 """ 86 """
87 Return an ID number for the run target name. 87 Return an ID number for the run target name.
88 If it doesn't exist, create one. 88 If it doesn't exist, create one.
89 """ 89 """
90 if not name in self.run_names_index: 90 if not name in self.run_names_index:
@@ -95,7 +95,7 @@ class TaskData:
95 95
96 def getfn_id(self, name): 96 def getfn_id(self, name):
97 """ 97 """
98 Return an ID number for the filename. 98 Return an ID number for the filename.
99 If it doesn't exist, create one. 99 If it doesn't exist, create one.
100 """ 100 """
101 if not name in self.fn_index: 101 if not name in self.fn_index:
@@ -271,7 +271,7 @@ class TaskData:
271 271
272 def get_unresolved_build_targets(self, dataCache): 272 def get_unresolved_build_targets(self, dataCache):
273 """ 273 """
274 Return a list of build targets who's providers 274 Return a list of build targets who's providers
275 are unknown. 275 are unknown.
276 """ 276 """
277 unresolved = [] 277 unresolved = []
@@ -286,7 +286,7 @@ class TaskData:
286 286
287 def get_unresolved_run_targets(self, dataCache): 287 def get_unresolved_run_targets(self, dataCache):
288 """ 288 """
289 Return a list of runtime targets who's providers 289 Return a list of runtime targets who's providers
290 are unknown. 290 are unknown.
291 """ 291 """
292 unresolved = [] 292 unresolved = []
@@ -304,7 +304,7 @@ class TaskData:
304 Return a list of providers of item 304 Return a list of providers of item
305 """ 305 """
306 targetid = self.getbuild_id(item) 306 targetid = self.getbuild_id(item)
307 307
308 return self.build_targets[targetid] 308 return self.build_targets[targetid]
309 309
310 def get_dependees(self, itemid): 310 def get_dependees(self, itemid):
@@ -367,7 +367,7 @@ class TaskData:
367 def add_provider_internal(self, cfgData, dataCache, item): 367 def add_provider_internal(self, cfgData, dataCache, item):
368 """ 368 """
369 Add the providers of item to the task data 369 Add the providers of item to the task data
370 Mark entries were specifically added externally as against dependencies 370 Mark entries were specifically added externally as against dependencies
371 added internally during dependency resolution 371 added internally during dependency resolution
372 """ 372 """
373 373
@@ -450,7 +450,7 @@ class TaskData:
450 providers_list.append(dataCache.pkg_fn[fn]) 450 providers_list.append(dataCache.pkg_fn[fn])
451 bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (%s);" % (item, ", ".join(providers_list))) 451 bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (%s);" % (item, ", ".join(providers_list)))
452 bb.msg.note(2, bb.msg.domain.Provider, "consider defining a PREFERRED_PROVIDER entry to match runtime %s" % item) 452 bb.msg.note(2, bb.msg.domain.Provider, "consider defining a PREFERRED_PROVIDER entry to match runtime %s" % item)
453 bb.event.fire(bb.event.MultipleProviders(item,providers_list, runtime=True), cfgData) 453 bb.event.fire(bb.event.MultipleProviders(item, providers_list, runtime=True), cfgData)
454 self.consider_msgs_cache.append(item) 454 self.consider_msgs_cache.append(item)
455 455
456 if numberPreferred > 1: 456 if numberPreferred > 1:
@@ -460,7 +460,7 @@ class TaskData:
460 providers_list.append(dataCache.pkg_fn[fn]) 460 providers_list.append(dataCache.pkg_fn[fn])
461 bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (top %s entries preferred) (%s);" % (item, numberPreferred, ", ".join(providers_list))) 461 bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (top %s entries preferred) (%s);" % (item, numberPreferred, ", ".join(providers_list)))
462 bb.msg.note(2, bb.msg.domain.Provider, "consider defining only one PREFERRED_PROVIDER entry to match runtime %s" % item) 462 bb.msg.note(2, bb.msg.domain.Provider, "consider defining only one PREFERRED_PROVIDER entry to match runtime %s" % item)
463 bb.event.fire(bb.event.MultipleProviders(item,providers_list, runtime=True), cfgData) 463 bb.event.fire(bb.event.MultipleProviders(item, providers_list, runtime=True), cfgData)
464 self.consider_msgs_cache.append(item) 464 self.consider_msgs_cache.append(item)
465 465
466 # run through the list until we find one that we can build 466 # run through the list until we find one that we can build
@@ -594,9 +594,9 @@ class TaskData:
594 bb.msg.debug(3, bb.msg.domain.TaskData, "tasks:") 594 bb.msg.debug(3, bb.msg.domain.TaskData, "tasks:")
595 for task in range(len(self.tasks_name)): 595 for task in range(len(self.tasks_name)):
596 bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s - %s: %s" % ( 596 bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s - %s: %s" % (
597 task, 597 task,
598 self.fn_index[self.tasks_fnid[task]], 598 self.fn_index[self.tasks_fnid[task]],
599 self.tasks_name[task], 599 self.tasks_name[task],
600 self.tasks_tdepends[task])) 600 self.tasks_tdepends[task]))
601 601
602 bb.msg.debug(3, bb.msg.domain.TaskData, "dependency ids (per fn):") 602 bb.msg.debug(3, bb.msg.domain.TaskData, "dependency ids (per fn):")
@@ -606,5 +606,3 @@ class TaskData:
606 bb.msg.debug(3, bb.msg.domain.TaskData, "runtime dependency ids (per fn):") 606 bb.msg.debug(3, bb.msg.domain.TaskData, "runtime dependency ids (per fn):")
607 for fnid in self.rdepids: 607 for fnid in self.rdepids:
608 bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.rdepids[fnid])) 608 bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.rdepids[fnid]))
609
610
diff --git a/bitbake/lib/bb/ui/__init__.py b/bitbake/lib/bb/ui/__init__.py
index c6a377a8e6..a4805ed028 100644
--- a/bitbake/lib/bb/ui/__init__.py
+++ b/bitbake/lib/bb/ui/__init__.py
@@ -15,4 +15,3 @@
15# You should have received a copy of the GNU General Public License along 15# You should have received a copy of the GNU General Public License along
16# with this program; if not, write to the Free Software Foundation, Inc., 16# with this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 17# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
diff --git a/bitbake/lib/bb/ui/crumbs/__init__.py b/bitbake/lib/bb/ui/crumbs/__init__.py
index c6a377a8e6..a4805ed028 100644
--- a/bitbake/lib/bb/ui/crumbs/__init__.py
+++ b/bitbake/lib/bb/ui/crumbs/__init__.py
@@ -15,4 +15,3 @@
15# You should have received a copy of the GNU General Public License along 15# You should have received a copy of the GNU General Public License along
16# with this program; if not, write to the Free Software Foundation, Inc., 16# with this program; if not, write to the Free Software Foundation, Inc.,
17# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 17# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
18
diff --git a/bitbake/lib/bb/ui/crumbs/buildmanager.py b/bitbake/lib/bb/ui/crumbs/buildmanager.py
index f89e8eefd4..f5a15329d5 100644
--- a/bitbake/lib/bb/ui/crumbs/buildmanager.py
+++ b/bitbake/lib/bb/ui/crumbs/buildmanager.py
@@ -28,7 +28,7 @@ import time
28class BuildConfiguration: 28class BuildConfiguration:
29 """ Represents a potential *or* historic *or* concrete build. It 29 """ Represents a potential *or* historic *or* concrete build. It
30 encompasses all the things that we need to tell bitbake to do to make it 30 encompasses all the things that we need to tell bitbake to do to make it
31 build what we want it to build. 31 build what we want it to build.
32 32
33 It also stored the metadata URL and the set of possible machines (and the 33 It also stored the metadata URL and the set of possible machines (and the
34 distros / images / uris for these. Apart from the metdata URL these are 34 distros / images / uris for these. Apart from the metdata URL these are
@@ -73,8 +73,8 @@ class BuildConfiguration:
73 return self.urls 73 return self.urls
74 74
75 # It might be a lot lot better if we stored these in like, bitbake conf 75 # It might be a lot lot better if we stored these in like, bitbake conf
76 # file format. 76 # file format.
77 @staticmethod 77 @staticmethod
78 def load_from_file (filename): 78 def load_from_file (filename):
79 f = open (filename, "r") 79 f = open (filename, "r")
80 80
@@ -140,13 +140,13 @@ class BuildResult(gobject.GObject):
140 ".conf" in the directory for the build. 140 ".conf" in the directory for the build.
141 141
142 This is GObject so that it can be included in the TreeStore.""" 142 This is GObject so that it can be included in the TreeStore."""
143 143
144 (STATE_COMPLETE, STATE_FAILED, STATE_ONGOING) = \ 144 (STATE_COMPLETE, STATE_FAILED, STATE_ONGOING) = \
145 (0, 1, 2) 145 (0, 1, 2)
146 146
147 def __init__ (self, parent, identifier): 147 def __init__ (self, parent, identifier):
148 gobject.GObject.__init__ (self) 148 gobject.GObject.__init__ (self)
149 self.date = None 149 self.date = None
150 150
151 self.files = [] 151 self.files = []
152 self.status = None 152 self.status = None
@@ -181,7 +181,7 @@ class BuildResult(gobject.GObject):
181 self.add_file (file) 181 self.add_file (file)
182 182
183 def add_file (self, file): 183 def add_file (self, file):
184 # Just add the file for now. Don't care about the type. 184 # Just add the file for now. Don't care about the type.
185 self.files += [(file, None)] 185 self.files += [(file, None)]
186 186
187class BuildManagerModel (gtk.TreeStore): 187class BuildManagerModel (gtk.TreeStore):
@@ -194,7 +194,7 @@ class BuildManagerModel (gtk.TreeStore):
194 194
195 def __init__ (self): 195 def __init__ (self):
196 gtk.TreeStore.__init__ (self, 196 gtk.TreeStore.__init__ (self,
197 gobject.TYPE_STRING, 197 gobject.TYPE_STRING,
198 gobject.TYPE_STRING, 198 gobject.TYPE_STRING,
199 gobject.TYPE_STRING, 199 gobject.TYPE_STRING,
200 gobject.TYPE_STRING, 200 gobject.TYPE_STRING,
@@ -207,7 +207,7 @@ class BuildManager (gobject.GObject):
207 "results" directory but is also used for starting a new build.""" 207 "results" directory but is also used for starting a new build."""
208 208
209 __gsignals__ = { 209 __gsignals__ = {
210 'population-finished' : (gobject.SIGNAL_RUN_LAST, 210 'population-finished' : (gobject.SIGNAL_RUN_LAST,
211 gobject.TYPE_NONE, 211 gobject.TYPE_NONE,
212 ()), 212 ()),
213 'populate-error' : (gobject.SIGNAL_RUN_LAST, 213 'populate-error' : (gobject.SIGNAL_RUN_LAST,
@@ -220,13 +220,13 @@ class BuildManager (gobject.GObject):
220 date = long (time.mktime (result.date.timetuple())) 220 date = long (time.mktime (result.date.timetuple()))
221 221
222 # Add a top level entry for the build 222 # Add a top level entry for the build
223 223
224 self.model.set (iter, 224 self.model.set (iter,
225 BuildManagerModel.COL_IDENT, result.identifier, 225 BuildManagerModel.COL_IDENT, result.identifier,
226 BuildManagerModel.COL_DESC, result.conf.image, 226 BuildManagerModel.COL_DESC, result.conf.image,
227 BuildManagerModel.COL_MACHINE, result.conf.machine, 227 BuildManagerModel.COL_MACHINE, result.conf.machine,
228 BuildManagerModel.COL_DISTRO, result.conf.distro, 228 BuildManagerModel.COL_DISTRO, result.conf.distro,
229 BuildManagerModel.COL_BUILD_RESULT, result, 229 BuildManagerModel.COL_BUILD_RESULT, result,
230 BuildManagerModel.COL_DATE, date, 230 BuildManagerModel.COL_DATE, date,
231 BuildManagerModel.COL_STATE, result.state) 231 BuildManagerModel.COL_STATE, result.state)
232 232
@@ -257,7 +257,7 @@ class BuildManager (gobject.GObject):
257 257
258 while (iter): 258 while (iter):
259 (ident, state) = self.model.get(iter, 259 (ident, state) = self.model.get(iter,
260 BuildManagerModel.COL_IDENT, 260 BuildManagerModel.COL_IDENT,
261 BuildManagerModel.COL_STATE) 261 BuildManagerModel.COL_STATE)
262 262
263 if state == BuildResult.STATE_ONGOING: 263 if state == BuildResult.STATE_ONGOING:
@@ -422,29 +422,29 @@ class BuildManagerTreeView (gtk.TreeView):
422 422
423 # Misc descriptiony thing 423 # Misc descriptiony thing
424 renderer = gtk.CellRendererText () 424 renderer = gtk.CellRendererText ()
425 col = gtk.TreeViewColumn (None, renderer, 425 col = gtk.TreeViewColumn (None, renderer,
426 text=BuildManagerModel.COL_DESC) 426 text=BuildManagerModel.COL_DESC)
427 self.append_column (col) 427 self.append_column (col)
428 428
429 # Machine 429 # Machine
430 renderer = gtk.CellRendererText () 430 renderer = gtk.CellRendererText ()
431 col = gtk.TreeViewColumn ("Machine", renderer, 431 col = gtk.TreeViewColumn ("Machine", renderer,
432 text=BuildManagerModel.COL_MACHINE) 432 text=BuildManagerModel.COL_MACHINE)
433 self.append_column (col) 433 self.append_column (col)
434 434
435 # distro 435 # distro
436 renderer = gtk.CellRendererText () 436 renderer = gtk.CellRendererText ()
437 col = gtk.TreeViewColumn ("Distribution", renderer, 437 col = gtk.TreeViewColumn ("Distribution", renderer,
438 text=BuildManagerModel.COL_DISTRO) 438 text=BuildManagerModel.COL_DISTRO)
439 self.append_column (col) 439 self.append_column (col)
440 440
441 # date (using a custom function for formatting the cell contents it 441 # date (using a custom function for formatting the cell contents it
442 # takes epoch -> human readable string) 442 # takes epoch -> human readable string)
443 renderer = gtk.CellRendererText () 443 renderer = gtk.CellRendererText ()
444 col = gtk.TreeViewColumn ("Date", renderer, 444 col = gtk.TreeViewColumn ("Date", renderer,
445 text=BuildManagerModel.COL_DATE) 445 text=BuildManagerModel.COL_DATE)
446 self.append_column (col) 446 self.append_column (col)
447 col.set_cell_data_func (renderer, 447 col.set_cell_data_func (renderer,
448 self.date_format_custom_cell_data_func) 448 self.date_format_custom_cell_data_func)
449 449
450 # For status. 450 # For status.
@@ -454,4 +454,3 @@ class BuildManagerTreeView (gtk.TreeView):
454 self.append_column (col) 454 self.append_column (col)
455 col.set_cell_data_func (renderer, 455 col.set_cell_data_func (renderer,
456 self.state_format_custom_cell_data_fun) 456 self.state_format_custom_cell_data_fun)
457
diff --git a/bitbake/lib/bb/ui/crumbs/runningbuild.py b/bitbake/lib/bb/ui/crumbs/runningbuild.py
index 18afd6674d..79e2c9060d 100644
--- a/bitbake/lib/bb/ui/crumbs/runningbuild.py
+++ b/bitbake/lib/bb/ui/crumbs/runningbuild.py
@@ -24,7 +24,7 @@ import gobject
24class RunningBuildModel (gtk.TreeStore): 24class RunningBuildModel (gtk.TreeStore):
25 (COL_TYPE, COL_PACKAGE, COL_TASK, COL_MESSAGE, COL_ICON, COL_ACTIVE) = (0, 1, 2, 3, 4, 5) 25 (COL_TYPE, COL_PACKAGE, COL_TASK, COL_MESSAGE, COL_ICON, COL_ACTIVE) = (0, 1, 2, 3, 4, 5)
26 def __init__ (self): 26 def __init__ (self):
27 gtk.TreeStore.__init__ (self, 27 gtk.TreeStore.__init__ (self,
28 gobject.TYPE_STRING, 28 gobject.TYPE_STRING,
29 gobject.TYPE_STRING, 29 gobject.TYPE_STRING,
30 gobject.TYPE_STRING, 30 gobject.TYPE_STRING,
@@ -34,7 +34,7 @@ class RunningBuildModel (gtk.TreeStore):
34 34
35class RunningBuild (gobject.GObject): 35class RunningBuild (gobject.GObject):
36 __gsignals__ = { 36 __gsignals__ = {
37 'build-succeeded' : (gobject.SIGNAL_RUN_LAST, 37 'build-succeeded' : (gobject.SIGNAL_RUN_LAST,
38 gobject.TYPE_NONE, 38 gobject.TYPE_NONE,
39 ()), 39 ()),
40 'build-failed' : (gobject.SIGNAL_RUN_LAST, 40 'build-failed' : (gobject.SIGNAL_RUN_LAST,
@@ -82,12 +82,12 @@ class RunningBuild (gobject.GObject):
82 82
83 # Add the message to the tree either at the top level if parent is 83 # Add the message to the tree either at the top level if parent is
84 # None otherwise as a descendent of a task. 84 # None otherwise as a descendent of a task.
85 self.model.append (parent, 85 self.model.append (parent,
86 (event.__name__.split()[-1], # e.g. MsgWarn, MsgError 86 (event.__name__.split()[-1], # e.g. MsgWarn, MsgError
87 package, 87 package,
88 task, 88 task,
89 event._message, 89 event._message,
90 icon, 90 icon,
91 False)) 91 False))
92 elif isinstance(event, bb.build.TaskStarted): 92 elif isinstance(event, bb.build.TaskStarted):
93 (package, task) = (event._package, event._task) 93 (package, task) = (event._package, event._task)
@@ -101,10 +101,10 @@ class RunningBuild (gobject.GObject):
101 if (self.tasks_to_iter.has_key ((package, None))): 101 if (self.tasks_to_iter.has_key ((package, None))):
102 parent = self.tasks_to_iter[(package, None)] 102 parent = self.tasks_to_iter[(package, None)]
103 else: 103 else:
104 parent = self.model.append (None, (None, 104 parent = self.model.append (None, (None,
105 package, 105 package,
106 None, 106 None,
107 "Package: %s" % (package), 107 "Package: %s" % (package),
108 None, 108 None,
109 False)) 109 False))
110 self.tasks_to_iter[(package, None)] = parent 110 self.tasks_to_iter[(package, None)] = parent
@@ -114,10 +114,10 @@ class RunningBuild (gobject.GObject):
114 self.model.set(parent, self.model.COL_ICON, "gtk-execute") 114 self.model.set(parent, self.model.COL_ICON, "gtk-execute")
115 115
116 # Add an entry in the model for this task 116 # Add an entry in the model for this task
117 i = self.model.append (parent, (None, 117 i = self.model.append (parent, (None,
118 package, 118 package,
119 task, 119 task,
120 "Task: %s" % (task), 120 "Task: %s" % (task),
121 None, 121 None,
122 False)) 122 False))
123 123
@@ -176,5 +176,3 @@ class RunningBuildTreeView (gtk.TreeView):
176 renderer = gtk.CellRendererText () 176 renderer = gtk.CellRendererText ()
177 col = gtk.TreeViewColumn ("Message", renderer, text=3) 177 col = gtk.TreeViewColumn ("Message", renderer, text=3)
178 self.append_column (col) 178 self.append_column (col)
179
180
diff --git a/bitbake/lib/bb/ui/depexp.py b/bitbake/lib/bb/ui/depexp.py
index cfa5b6564e..c596cad5cf 100644
--- a/bitbake/lib/bb/ui/depexp.py
+++ b/bitbake/lib/bb/ui/depexp.py
@@ -233,7 +233,7 @@ def init(server, eventHandler):
233 x = event.sofar 233 x = event.sofar
234 y = event.total 234 y = event.total
235 if x == y: 235 if x == y:
236 print("\nParsing finished. %d cached, %d parsed, %d skipped, %d masked, %d errors." 236 print("\nParsing finished. %d cached, %d parsed, %d skipped, %d masked, %d errors."
237 % ( event.cached, event.parsed, event.skipped, event.masked, event.errors)) 237 % ( event.cached, event.parsed, event.skipped, event.masked, event.errors))
238 pbar.hide() 238 pbar.hide()
239 gtk.gdk.threads_enter() 239 gtk.gdk.threads_enter()
@@ -269,4 +269,3 @@ def init(server, eventHandler):
269 server.runCommand(["stateShutdown"]) 269 server.runCommand(["stateShutdown"])
270 shutdown = shutdown + 1 270 shutdown = shutdown + 1
271 pass 271 pass
272
diff --git a/bitbake/lib/bb/ui/goggle.py b/bitbake/lib/bb/ui/goggle.py
index 94995d82db..bcba38be9c 100644
--- a/bitbake/lib/bb/ui/goggle.py
+++ b/bitbake/lib/bb/ui/goggle.py
@@ -25,13 +25,13 @@ from bb.ui.crumbs.runningbuild import RunningBuildTreeView, RunningBuild
25 25
26def event_handle_idle_func (eventHandler, build): 26def event_handle_idle_func (eventHandler, build):
27 27
28 # Consume as many messages as we can in the time available to us 28 # Consume as many messages as we can in the time available to us
29 event = eventHandler.getEvent() 29 event = eventHandler.getEvent()
30 while event: 30 while event:
31 build.handle_event (event) 31 build.handle_event (event)
32 event = eventHandler.getEvent() 32 event = eventHandler.getEvent()
33 33
34 return True 34 return True
35 35
36class MainWindow (gtk.Window): 36class MainWindow (gtk.Window):
37 def __init__ (self): 37 def __init__ (self):
@@ -74,4 +74,3 @@ def init (server, eventHandler):
74 running_build) 74 running_build)
75 75
76 gtk.main() 76 gtk.main()
77
diff --git a/bitbake/lib/bb/ui/knotty.py b/bitbake/lib/bb/ui/knotty.py
index ed26bb2b4c..3261792dfc 100644
--- a/bitbake/lib/bb/ui/knotty.py
+++ b/bitbake/lib/bb/ui/knotty.py
@@ -132,7 +132,7 @@ def init(server, eventHandler):
132 sys.stdout.write("done.") 132 sys.stdout.write("done.")
133 sys.stdout.flush() 133 sys.stdout.flush()
134 if x == y: 134 if x == y:
135 print("\nParsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors." 135 print("\nParsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors."
136 % ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors)) 136 % ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors))
137 continue 137 continue
138 138
diff --git a/bitbake/lib/bb/ui/ncurses.py b/bitbake/lib/bb/ui/ncurses.py
index da3690e5ca..0eb1cf013b 100644
--- a/bitbake/lib/bb/ui/ncurses.py
+++ b/bitbake/lib/bb/ui/ncurses.py
@@ -136,7 +136,7 @@ class NCursesUI:
136 """Thread Activity Window""" 136 """Thread Activity Window"""
137 def __init__( self, x, y, width, height ): 137 def __init__( self, x, y, width, height ):
138 NCursesUI.DecoratedWindow.__init__( self, "Thread Activity", x, y, width, height ) 138 NCursesUI.DecoratedWindow.__init__( self, "Thread Activity", x, y, width, height )
139 139
140 def setStatus( self, thread, text ): 140 def setStatus( self, thread, text ):
141 line = "%02d: %s" % ( thread, text ) 141 line = "%02d: %s" % ( thread, text )
142 width = self.dimensions[WIDTH] 142 width = self.dimensions[WIDTH]
@@ -225,7 +225,7 @@ class NCursesUI:
225 225
226 helper = uihelper.BBUIHelper() 226 helper = uihelper.BBUIHelper()
227 shutdown = 0 227 shutdown = 0
228 228
229 try: 229 try:
230 cmdline = server.runCommand(["getCmdLineAction"]) 230 cmdline = server.runCommand(["getCmdLineAction"])
231 if not cmdline: 231 if not cmdline:
@@ -263,7 +263,7 @@ class NCursesUI:
263 y = event.total 263 y = event.total
264 if x == y: 264 if x == y:
265 mw.setStatus("Idle") 265 mw.setStatus("Idle")
266 mw.appendText("Parsing finished. %d cached, %d parsed, %d skipped, %d masked." 266 mw.appendText("Parsing finished. %d cached, %d parsed, %d skipped, %d masked."
267 % ( event.cached, event.parsed, event.skipped, event.masked )) 267 % ( event.cached, event.parsed, event.skipped, event.masked ))
268 else: 268 else:
269 mw.setStatus("Parsing: %s (%04d/%04d) [%2d %%]" % ( parsespin.next(), x, y, x*100/y ) ) 269 mw.setStatus("Parsing: %s (%04d/%04d) [%2d %%]" % ( parsespin.next(), x, y, x*100/y ) )
@@ -332,4 +332,3 @@ def init(server, eventHandler):
332 except: 332 except:
333 import traceback 333 import traceback
334 traceback.print_exc() 334 traceback.print_exc()
335
diff --git a/bitbake/lib/bb/ui/puccho.py b/bitbake/lib/bb/ui/puccho.py
index 713aa1f4a6..dfcb0f7651 100644
--- a/bitbake/lib/bb/ui/puccho.py
+++ b/bitbake/lib/bb/ui/puccho.py
@@ -38,7 +38,7 @@ class MetaDataLoader(gobject.GObject):
38 on what machines are available. The distribution and images available for 38 on what machines are available. The distribution and images available for
39 the machine and the the uris to use for building the given machine.""" 39 the machine and the the uris to use for building the given machine."""
40 __gsignals__ = { 40 __gsignals__ = {
41 'success' : (gobject.SIGNAL_RUN_LAST, 41 'success' : (gobject.SIGNAL_RUN_LAST,
42 gobject.TYPE_NONE, 42 gobject.TYPE_NONE,
43 ()), 43 ()),
44 'error' : (gobject.SIGNAL_RUN_LAST, 44 'error' : (gobject.SIGNAL_RUN_LAST,
@@ -293,7 +293,7 @@ class BuildSetupDialog (gtk.Dialog):
293 if (active_iter): 293 if (active_iter):
294 self.configuration.machine = model.get(active_iter, 0)[0] 294 self.configuration.machine = model.get(active_iter, 0)[0]
295 295
296 # Extract the chosen distro from the combo 296 # Extract the chosen distro from the combo
297 model = self.distribution_combo.get_model() 297 model = self.distribution_combo.get_model()
298 active_iter = self.distribution_combo.get_active_iter() 298 active_iter = self.distribution_combo.get_active_iter()
299 if (active_iter): 299 if (active_iter):
@@ -311,62 +311,62 @@ class BuildSetupDialog (gtk.Dialog):
311# 311#
312# TODO: Should be a method on the RunningBuild class 312# TODO: Should be a method on the RunningBuild class
313def event_handle_timeout (eventHandler, build): 313def event_handle_timeout (eventHandler, build):
314 # Consume as many messages as we can ... 314 # Consume as many messages as we can ...
315 event = eventHandler.getEvent() 315 event = eventHandler.getEvent()
316 while event: 316 while event:
317 build.handle_event (event) 317 build.handle_event (event)
318 event = eventHandler.getEvent() 318 event = eventHandler.getEvent()
319 return True 319 return True
320 320
321class MainWindow (gtk.Window): 321class MainWindow (gtk.Window):
322 322
323 # Callback that gets fired when the user hits a button in the 323 # Callback that gets fired when the user hits a button in the
324 # BuildSetupDialog. 324 # BuildSetupDialog.
325 def build_dialog_box_response_cb (self, dialog, response_id): 325 def build_dialog_box_response_cb (self, dialog, response_id):
326 conf = None 326 conf = None
327 if (response_id == BuildSetupDialog.RESPONSE_BUILD): 327 if (response_id == BuildSetupDialog.RESPONSE_BUILD):
328 dialog.update_configuration() 328 dialog.update_configuration()
329 print dialog.configuration.machine, dialog.configuration.distro, \ 329 print dialog.configuration.machine, dialog.configuration.distro, \
330 dialog.configuration.image 330 dialog.configuration.image
331 conf = dialog.configuration 331 conf = dialog.configuration
332 332
333 dialog.destroy() 333 dialog.destroy()
334 334
335 if conf: 335 if conf:
336 self.manager.do_build (conf) 336 self.manager.do_build (conf)
337 337
338 def build_button_clicked_cb (self, button): 338 def build_button_clicked_cb (self, button):
339 dialog = BuildSetupDialog () 339 dialog = BuildSetupDialog ()
340 340
341 # For some unknown reason Dialog.run causes nice little deadlocks ... :-( 341 # For some unknown reason Dialog.run causes nice little deadlocks ... :-(
342 dialog.connect ("response", self.build_dialog_box_response_cb) 342 dialog.connect ("response", self.build_dialog_box_response_cb)
343 dialog.show() 343 dialog.show()
344 344
345 def __init__ (self): 345 def __init__ (self):
346 gtk.Window.__init__ (self) 346 gtk.Window.__init__ (self)
347 347
348 # Pull in *just* the main vbox from the Glade XML data and then pack 348 # Pull in *just* the main vbox from the Glade XML data and then pack
349 # that inside the window 349 # that inside the window
350 gxml = gtk.glade.XML (os.path.dirname(__file__) + "/crumbs/puccho.glade", 350 gxml = gtk.glade.XML (os.path.dirname(__file__) + "/crumbs/puccho.glade",
351 root = "main_window_vbox") 351 root = "main_window_vbox")
352 vbox = gxml.get_widget ("main_window_vbox") 352 vbox = gxml.get_widget ("main_window_vbox")
353 self.add (vbox) 353 self.add (vbox)
354 354
355 # Create the tree views for the build manager view and the progress view 355 # Create the tree views for the build manager view and the progress view
356 self.build_manager_view = BuildManagerTreeView() 356 self.build_manager_view = BuildManagerTreeView()
357 self.running_build_view = RunningBuildTreeView() 357 self.running_build_view = RunningBuildTreeView()
358 358
359 # Grab the scrolled windows that we put the tree views into 359 # Grab the scrolled windows that we put the tree views into
360 self.results_scrolledwindow = gxml.get_widget ("results_scrolledwindow") 360 self.results_scrolledwindow = gxml.get_widget ("results_scrolledwindow")
361 self.progress_scrolledwindow = gxml.get_widget ("progress_scrolledwindow") 361 self.progress_scrolledwindow = gxml.get_widget ("progress_scrolledwindow")
362 362
363 # Put the tree views inside ... 363 # Put the tree views inside ...
364 self.results_scrolledwindow.add (self.build_manager_view) 364 self.results_scrolledwindow.add (self.build_manager_view)
365 self.progress_scrolledwindow.add (self.running_build_view) 365 self.progress_scrolledwindow.add (self.running_build_view)
366 366
367 # Hook up the build button... 367 # Hook up the build button...
368 self.build_button = gxml.get_widget ("main_toolbutton_build") 368 self.build_button = gxml.get_widget ("main_toolbutton_build")
369 self.build_button.connect ("clicked", self.build_button_clicked_cb) 369 self.build_button.connect ("clicked", self.build_button_clicked_cb)
370 370
371# I'm not very happy about the current ownership of the RunningBuild. I have 371# I'm not very happy about the current ownership of the RunningBuild. I have
372# my suspicions that this object should be held by the BuildManager since we 372# my suspicions that this object should be held by the BuildManager since we
diff --git a/bitbake/lib/bb/ui/uievent.py b/bitbake/lib/bb/ui/uievent.py
index 36302f4da7..5b3efffcba 100644
--- a/bitbake/lib/bb/ui/uievent.py
+++ b/bitbake/lib/bb/ui/uievent.py
@@ -19,7 +19,7 @@
19 19
20 20
21""" 21"""
22Use this class to fork off a thread to recieve event callbacks from the bitbake 22Use this class to fork off a thread to recieve event callbacks from the bitbake
23server and queue them for the UI to process. This process must be used to avoid 23server and queue them for the UI to process. This process must be used to avoid
24client/server deadlocks. 24client/server deadlocks.
25""" 25"""
@@ -116,10 +116,9 @@ class UIXMLRPCServer (SimpleXMLRPCServer):
116 if request is None: 116 if request is None:
117 return 117 return
118 SimpleXMLRPCServer.close_request(self, request) 118 SimpleXMLRPCServer.close_request(self, request)
119 119
120 def process_request(self, request, client_address): 120 def process_request(self, request, client_address):
121 if request is None: 121 if request is None:
122 return 122 return
123 SimpleXMLRPCServer.process_request(self, request, client_address) 123 SimpleXMLRPCServer.process_request(self, request, client_address)
124 124
125
diff --git a/bitbake/lib/bb/utils.py b/bitbake/lib/bb/utils.py
index 5015ee440a..93c158c604 100644
--- a/bitbake/lib/bb/utils.py
+++ b/bitbake/lib/bb/utils.py
@@ -19,11 +19,12 @@ BitBake Utility Functions
19# with this program; if not, write to the Free Software Foundation, Inc., 19# with this program; if not, write to the Free Software Foundation, Inc.,
20# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. 20# 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA.
21 21
22separators = ".-"
23
24import re, fcntl, os, types, bb, string, stat, shutil, time 22import re, fcntl, os, types, bb, string, stat, shutil, time
25from commands import getstatusoutput 23from commands import getstatusoutput
26 24
25# Version comparison
26separators = ".-"
27
27# Context used in better_exec, eval 28# Context used in better_exec, eval
28_context = { 29_context = {
29 "os": os, 30 "os": os,
@@ -92,19 +93,19 @@ def vercmp(ta, tb):
92 r = vercmp_part(ra, rb) 93 r = vercmp_part(ra, rb)
93 return r 94 return r
94 95
95_package_weights_ = {"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1} # dicts are unordered 96_package_weights_ = {"pre":-2, "p":0, "alpha":-4, "beta":-3, "rc":-1} # dicts are unordered
96_package_ends_ = ["pre", "p", "alpha", "beta", "rc", "cvs", "bk", "HEAD" ] # so we need ordered list 97_package_ends_ = ["pre", "p", "alpha", "beta", "rc", "cvs", "bk", "HEAD" ] # so we need ordered list
97 98
98def relparse(myver): 99def relparse(myver):
99 """Parses the last elements of a version number into a triplet, that can 100 """Parses the last elements of a version number into a triplet, that can
100 later be compared. 101 later be compared.
101 """ 102 """
102 103
103 number = 0 104 number = 0
104 p1 = 0 105 p1 = 0
105 p2 = 0 106 p2 = 0
106 mynewver = myver.split('_') 107 mynewver = myver.split('_')
107 if len(mynewver)==2: 108 if len(mynewver) == 2:
108 # an _package_weights_ 109 # an _package_weights_
109 number = float(mynewver[0]) 110 number = float(mynewver[0])
110 match = 0 111 match = 0
@@ -132,15 +133,15 @@ def relparse(myver):
132 divider = len(myver)-1 133 divider = len(myver)-1
133 if myver[divider:] not in "1234567890": 134 if myver[divider:] not in "1234567890":
134 #letter at end 135 #letter at end
135 p1 = ord(myver[divider:]) 136 p1 = ord(myver[divider:])
136 number = float(myver[0:divider]) 137 number = float(myver[0:divider])
137 else: 138 else:
138 number = float(myver) 139 number = float(myver)
139 return [number,p1,p2] 140 return [number, p1, p2]
140 141
141__vercmp_cache__ = {} 142__vercmp_cache__ = {}
142 143
143def vercmp_string(val1,val2): 144def vercmp_string(val1, val2):
144 """This takes two version strings and returns an integer to tell you whether 145 """This takes two version strings and returns an integer to tell you whether
145 the versions are the same, val1>val2 or val2>val1. 146 the versions are the same, val1>val2 or val2>val1.
146 """ 147 """
@@ -148,13 +149,13 @@ def vercmp_string(val1,val2):
148 # quick short-circuit 149 # quick short-circuit
149 if val1 == val2: 150 if val1 == val2:
150 return 0 151 return 0
151 valkey = val1+" "+val2 152 valkey = val1 + " " + val2
152 153
153 # cache lookup 154 # cache lookup
154 try: 155 try:
155 return __vercmp_cache__[valkey] 156 return __vercmp_cache__[valkey]
156 try: 157 try:
157 return - __vercmp_cache__[val2+" "+val1] 158 return - __vercmp_cache__[val2 + " " + val1]
158 except KeyError: 159 except KeyError:
159 pass 160 pass
160 except KeyError: 161 except KeyError:
@@ -175,21 +176,21 @@ def vercmp_string(val1,val2):
175 # replace '-' by '.' 176 # replace '-' by '.'
176 # FIXME: Is it needed? can val1/2 contain '-'? 177 # FIXME: Is it needed? can val1/2 contain '-'?
177 178
178 val1 = string.split(val1,'-') 179 val1 = val1.split("-")
179 if len(val1) == 2: 180 if len(val1) == 2:
180 val1[0] = val1[0] +"."+ val1[1] 181 val1[0] = val1[0] + "." + val1[1]
181 val2 = string.split(val2,'-') 182 val2 = val2.split("-")
182 if len(val2) == 2: 183 if len(val2) == 2:
183 val2[0] = val2[0] +"."+ val2[1] 184 val2[0] = val2[0] + "." + val2[1]
184 185
185 val1 = string.split(val1[0],'.') 186 val1 = val1[0].split('.')
186 val2 = string.split(val2[0],'.') 187 val2 = val2[0].split('.')
187 188
188 # add back decimal point so that .03 does not become "3" ! 189 # add back decimal point so that .03 does not become "3" !
189 for x in range(1,len(val1)): 190 for x in range(1, len(val1)):
190 if val1[x][0] == '0' : 191 if val1[x][0] == '0' :
191 val1[x] = '.' + val1[x] 192 val1[x] = '.' + val1[x]
192 for x in range(1,len(val2)): 193 for x in range(1, len(val2)):
193 if val2[x][0] == '0' : 194 if val2[x][0] == '0' :
194 val2[x] = '.' + val2[x] 195 val2[x] = '.' + val2[x]
195 196
@@ -206,10 +207,10 @@ def vercmp_string(val1,val2):
206 val2[-1] += '_' + val2_prepart 207 val2[-1] += '_' + val2_prepart
207 # The above code will extend version numbers out so they 208 # The above code will extend version numbers out so they
208 # have the same number of digits. 209 # have the same number of digits.
209 for x in range(0,len(val1)): 210 for x in range(0, len(val1)):
210 cmp1 = relparse(val1[x]) 211 cmp1 = relparse(val1[x])
211 cmp2 = relparse(val2[x]) 212 cmp2 = relparse(val2[x])
212 for y in range(0,3): 213 for y in range(0, 3):
213 myret = cmp1[y] - cmp2[y] 214 myret = cmp1[y] - cmp2[y]
214 if myret != 0: 215 if myret != 0:
215 __vercmp_cache__[valkey] = myret 216 __vercmp_cache__[valkey] = myret
@@ -290,9 +291,9 @@ def _print_trace(body, line):
290 291
291 # print the environment of the method 292 # print the environment of the method
292 bb.msg.error(bb.msg.domain.Util, "Printing the environment of the function") 293 bb.msg.error(bb.msg.domain.Util, "Printing the environment of the function")
293 min_line = max(1,line-4) 294 min_line = max(1, line-4)
294 max_line = min(line+4,len(body)-1) 295 max_line = min(line + 4, len(body)-1)
295 for i in range(min_line,max_line+1): 296 for i in range(min_line, max_line + 1):
296 bb.msg.error(bb.msg.domain.Util, "\t%.4d:%s" % (i, body[i-1]) ) 297 bb.msg.error(bb.msg.domain.Util, "\t%.4d:%s" % (i, body[i-1]) )
297 298
298 299
@@ -304,7 +305,7 @@ def better_compile(text, file, realfile, mode = "exec"):
304 try: 305 try:
305 return compile(text, file, mode) 306 return compile(text, file, mode)
306 except Exception, e: 307 except Exception, e:
307 import bb,sys 308 import bb, sys
308 309
309 # split the text into lines again 310 # split the text into lines again
310 body = text.split('\n') 311 body = text.split('\n')
@@ -323,18 +324,18 @@ def better_exec(code, context, text, realfile):
323 print the lines that are responsible for the 324 print the lines that are responsible for the
324 error. 325 error.
325 """ 326 """
326 import bb,sys 327 import bb, sys
327 try: 328 try:
328 exec code in _context, context 329 exec code in _context, context
329 except: 330 except:
330 (t,value,tb) = sys.exc_info() 331 (t, value, tb) = sys.exc_info()
331 332
332 if t in [bb.parse.SkipPackage, bb.build.FuncFailed]: 333 if t in [bb.parse.SkipPackage, bb.build.FuncFailed]:
333 raise 334 raise
334 335
335 # print the Header of the Error Message 336 # print the Header of the Error Message
336 bb.msg.error(bb.msg.domain.Util, "Error in executing python function in: %s" % realfile) 337 bb.msg.error(bb.msg.domain.Util, "Error in executing python function in: %s" % realfile)
337 bb.msg.error(bb.msg.domain.Util, "Exception:%s Message:%s" % (t,value) ) 338 bb.msg.error(bb.msg.domain.Util, "Exception:%s Message:%s" % (t, value))
338 339
339 # let us find the line number now 340 # let us find the line number now
340 while tb.tb_next: 341 while tb.tb_next:
@@ -344,7 +345,7 @@ def better_exec(code, context, text, realfile):
344 line = traceback.tb_lineno(tb) 345 line = traceback.tb_lineno(tb)
345 346
346 _print_trace( text.split('\n'), line ) 347 _print_trace( text.split('\n'), line )
347 348
348 raise 349 raise
349 350
350def simple_exec(code, context): 351def simple_exec(code, context):
@@ -367,22 +368,22 @@ def lockfile(name):
367 while True: 368 while True:
368 # If we leave the lockfiles lying around there is no problem 369 # If we leave the lockfiles lying around there is no problem
369 # but we should clean up after ourselves. This gives potential 370 # but we should clean up after ourselves. This gives potential
370 # for races though. To work around this, when we acquire the lock 371 # for races though. To work around this, when we acquire the lock
371 # we check the file we locked was still the lock file on disk. 372 # we check the file we locked was still the lock file on disk.
372 # by comparing inode numbers. If they don't match or the lockfile 373 # by comparing inode numbers. If they don't match or the lockfile
373 # no longer exists, we start again. 374 # no longer exists, we start again.
374 375
375 # This implementation is unfair since the last person to request the 376 # This implementation is unfair since the last person to request the
376 # lock is the most likely to win it. 377 # lock is the most likely to win it.
377 378
378 try: 379 try:
379 lf = open(name, "a+") 380 lf = open(name, "a + ")
380 fcntl.flock(lf.fileno(), fcntl.LOCK_EX) 381 fcntl.flock(lf.fileno(), fcntl.LOCK_EX)
381 statinfo = os.fstat(lf.fileno()) 382 statinfo = os.fstat(lf.fileno())
382 if os.path.exists(lf.name): 383 if os.path.exists(lf.name):
383 statinfo2 = os.stat(lf.name) 384 statinfo2 = os.stat(lf.name)
384 if statinfo.st_ino == statinfo2.st_ino: 385 if statinfo.st_ino == statinfo2.st_ino:
385 return lf 386 return lf
386 # File no longer exists or changed, retry 387 # File no longer exists or changed, retry
387 lf.close 388 lf.close
388 except Exception, e: 389 except Exception, e:
@@ -390,7 +391,7 @@ def lockfile(name):
390 391
391def unlockfile(lf): 392def unlockfile(lf):
392 """ 393 """
393 Unlock a file locked using lockfile() 394 Unlock a file locked using lockfile()
394 """ 395 """
395 os.unlink(lf.name) 396 os.unlink(lf.name)
396 fcntl.flock(lf.fileno(), fcntl.LOCK_UN) 397 fcntl.flock(lf.fileno(), fcntl.LOCK_UN)
@@ -406,7 +407,7 @@ def md5_file(filename):
406 except ImportError: 407 except ImportError:
407 import md5 408 import md5
408 m = md5.new() 409 m = md5.new()
409 410
410 for line in open(filename): 411 for line in open(filename):
411 m.update(line) 412 m.update(line)
412 return m.hexdigest() 413 return m.hexdigest()
@@ -472,7 +473,7 @@ def filter_environment(good_vars):
472 for key in os.environ.keys(): 473 for key in os.environ.keys():
473 if key in good_vars: 474 if key in good_vars:
474 continue 475 continue
475 476
476 removed_vars.append(key) 477 removed_vars.append(key)
477 os.unsetenv(key) 478 os.unsetenv(key)
478 del os.environ[key] 479 del os.environ[key]
@@ -517,7 +518,7 @@ def build_environment(d):
517def prunedir(topdir): 518def prunedir(topdir):
518 # Delete everything reachable from the directory named in 'topdir'. 519 # Delete everything reachable from the directory named in 'topdir'.
519 # CAUTION: This is dangerous! 520 # CAUTION: This is dangerous!
520 for root, dirs, files in os.walk(topdir, topdown=False): 521 for root, dirs, files in os.walk(topdir, topdown = False):
521 for name in files: 522 for name in files:
522 os.remove(os.path.join(root, name)) 523 os.remove(os.path.join(root, name))
523 for name in dirs: 524 for name in dirs:
@@ -532,7 +533,7 @@ def prunedir(topdir):
532# but thats possibly insane and suffixes is probably going to be small 533# but thats possibly insane and suffixes is probably going to be small
533# 534#
534def prune_suffix(var, suffixes, d): 535def prune_suffix(var, suffixes, d):
535 # See if var ends with any of the suffixes listed and 536 # See if var ends with any of the suffixes listed and
536 # remove it if found 537 # remove it if found
537 for suffix in suffixes: 538 for suffix in suffixes:
538 if var.endswith(suffix): 539 if var.endswith(suffix):
@@ -553,42 +554,42 @@ def mkdirhier(dir):
553 554
554import stat 555import stat
555 556
556def movefile(src,dest,newmtime=None,sstat=None): 557def movefile(src, dest, newmtime = None, sstat = None):
557 """Moves a file from src to dest, preserving all permissions and 558 """Moves a file from src to dest, preserving all permissions and
558 attributes; mtime will be preserved even when moving across 559 attributes; mtime will be preserved even when moving across
559 filesystems. Returns true on success and false on failure. Move is 560 filesystems. Returns true on success and false on failure. Move is
560 atomic. 561 atomic.
561 """ 562 """
562 563
563 #print "movefile("+src+","+dest+","+str(newmtime)+","+str(sstat)+")" 564 #print "movefile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
564 try: 565 try:
565 if not sstat: 566 if not sstat:
566 sstat=os.lstat(src) 567 sstat = os.lstat(src)
567 except Exception, e: 568 except Exception, e:
568 print "movefile: Stating source file failed...", e 569 print "movefile: Stating source file failed...", e
569 return None 570 return None
570 571
571 destexists=1 572 destexists = 1
572 try: 573 try:
573 dstat=os.lstat(dest) 574 dstat = os.lstat(dest)
574 except: 575 except:
575 dstat=os.lstat(os.path.dirname(dest)) 576 dstat = os.lstat(os.path.dirname(dest))
576 destexists=0 577 destexists = 0
577 578
578 if destexists: 579 if destexists:
579 if stat.S_ISLNK(dstat[stat.ST_MODE]): 580 if stat.S_ISLNK(dstat[stat.ST_MODE]):
580 try: 581 try:
581 os.unlink(dest) 582 os.unlink(dest)
582 destexists=0 583 destexists = 0
583 except Exception, e: 584 except Exception, e:
584 pass 585 pass
585 586
586 if stat.S_ISLNK(sstat[stat.ST_MODE]): 587 if stat.S_ISLNK(sstat[stat.ST_MODE]):
587 try: 588 try:
588 target=os.readlink(src) 589 target = os.readlink(src)
589 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): 590 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
590 os.unlink(dest) 591 os.unlink(dest)
591 os.symlink(target,dest) 592 os.symlink(target, dest)
592 #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) 593 #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
593 os.unlink(src) 594 os.unlink(src)
594 return os.lstat(dest) 595 return os.lstat(dest)
@@ -596,38 +597,38 @@ def movefile(src,dest,newmtime=None,sstat=None):
596 print "movefile: failed to properly create symlink:", dest, "->", target, e 597 print "movefile: failed to properly create symlink:", dest, "->", target, e
597 return None 598 return None
598 599
599 renamefailed=1 600 renamefailed = 1
600 if sstat[stat.ST_DEV]==dstat[stat.ST_DEV]: 601 if sstat[stat.ST_DEV] == dstat[stat.ST_DEV]:
601 try: 602 try:
602 ret=os.rename(src,dest) 603 ret = os.rename(src, dest)
603 renamefailed=0 604 renamefailed = 0
604 except Exception, e: 605 except Exception, e:
605 import errno 606 import errno
606 if e[0]!=errno.EXDEV: 607 if e[0] != errno.EXDEV:
607 # Some random error. 608 # Some random error.
608 print "movefile: Failed to move", src, "to", dest, e 609 print "movefile: Failed to move", src, "to", dest, e
609 return None 610 return None
610 # Invalid cross-device-link 'bind' mounted or actually Cross-Device 611 # Invalid cross-device-link 'bind' mounted or actually Cross-Device
611 612
612 if renamefailed: 613 if renamefailed:
613 didcopy=0 614 didcopy = 0
614 if stat.S_ISREG(sstat[stat.ST_MODE]): 615 if stat.S_ISREG(sstat[stat.ST_MODE]):
615 try: # For safety copy then move it over. 616 try: # For safety copy then move it over.
616 shutil.copyfile(src,dest+"#new") 617 shutil.copyfile(src, dest + "#new")
617 os.rename(dest+"#new",dest) 618 os.rename(dest + "#new", dest)
618 didcopy=1 619 didcopy = 1
619 except Exception, e: 620 except Exception, e:
620 print 'movefile: copy', src, '->', dest, 'failed.', e 621 print 'movefile: copy', src, '->', dest, 'failed.', e
621 return None 622 return None
622 else: 623 else:
623 #we don't yet handle special, so we need to fall back to /bin/mv 624 #we don't yet handle special, so we need to fall back to /bin/mv
624 a=getstatusoutput("/bin/mv -f "+"'"+src+"' '"+dest+"'") 625 a = getstatusoutput("/bin/mv -f " + "'" + src + "' '" + dest + "'")
625 if a[0]!=0: 626 if a[0] != 0:
626 print "movefile: Failed to move special file:" + src + "' to '" + dest + "'", a 627 print "movefile: Failed to move special file:" + src + "' to '" + dest + "'", a
627 return None # failure 628 return None # failure
628 try: 629 try:
629 if didcopy: 630 if didcopy:
630 os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) 631 os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
631 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown 632 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
632 os.unlink(src) 633 os.unlink(src)
633 except Exception, e: 634 except Exception, e:
@@ -635,47 +636,47 @@ def movefile(src,dest,newmtime=None,sstat=None):
635 return None 636 return None
636 637
637 if newmtime: 638 if newmtime:
638 os.utime(dest,(newmtime,newmtime)) 639 os.utime(dest, (newmtime, newmtime))
639 else: 640 else:
640 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) 641 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
641 newmtime=sstat[stat.ST_MTIME] 642 newmtime = sstat[stat.ST_MTIME]
642 return newmtime 643 return newmtime
643 644
644def copyfile(src,dest,newmtime=None,sstat=None): 645def copyfile(src, dest, newmtime = None, sstat = None):
645 """ 646 """
646 Copies a file from src to dest, preserving all permissions and 647 Copies a file from src to dest, preserving all permissions and
647 attributes; mtime will be preserved even when moving across 648 attributes; mtime will be preserved even when moving across
648 filesystems. Returns true on success and false on failure. 649 filesystems. Returns true on success and false on failure.
649 """ 650 """
650 #print "copyfile("+src+","+dest+","+str(newmtime)+","+str(sstat)+")" 651 #print "copyfile(" + src + "," + dest + "," + str(newmtime) + "," + str(sstat) + ")"
651 try: 652 try:
652 if not sstat: 653 if not sstat:
653 sstat=os.lstat(src) 654 sstat = os.lstat(src)
654 except Exception, e: 655 except Exception, e:
655 print "copyfile: Stating source file failed...", e 656 print "copyfile: Stating source file failed...", e
656 return False 657 return False
657 658
658 destexists=1 659 destexists = 1
659 try: 660 try:
660 dstat=os.lstat(dest) 661 dstat = os.lstat(dest)
661 except: 662 except:
662 dstat=os.lstat(os.path.dirname(dest)) 663 dstat = os.lstat(os.path.dirname(dest))
663 destexists=0 664 destexists = 0
664 665
665 if destexists: 666 if destexists:
666 if stat.S_ISLNK(dstat[stat.ST_MODE]): 667 if stat.S_ISLNK(dstat[stat.ST_MODE]):
667 try: 668 try:
668 os.unlink(dest) 669 os.unlink(dest)
669 destexists=0 670 destexists = 0
670 except Exception, e: 671 except Exception, e:
671 pass 672 pass
672 673
673 if stat.S_ISLNK(sstat[stat.ST_MODE]): 674 if stat.S_ISLNK(sstat[stat.ST_MODE]):
674 try: 675 try:
675 target=os.readlink(src) 676 target = os.readlink(src)
676 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): 677 if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]):
677 os.unlink(dest) 678 os.unlink(dest)
678 os.symlink(target,dest) 679 os.symlink(target, dest)
679 #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) 680 #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID])
680 return os.lstat(dest) 681 return os.lstat(dest)
681 except Exception, e: 682 except Exception, e:
@@ -683,30 +684,30 @@ def copyfile(src,dest,newmtime=None,sstat=None):
683 return False 684 return False
684 685
685 if stat.S_ISREG(sstat[stat.ST_MODE]): 686 if stat.S_ISREG(sstat[stat.ST_MODE]):
686 try: # For safety copy then move it over. 687 try: # For safety copy then move it over.
687 shutil.copyfile(src,dest+"#new") 688 shutil.copyfile(src, dest + "#new")
688 os.rename(dest+"#new",dest) 689 os.rename(dest + "#new", dest)
689 except Exception, e: 690 except Exception, e:
690 print 'copyfile: copy', src, '->', dest, 'failed.', e 691 print 'copyfile: copy', src, '->', dest, 'failed.', e
691 return False 692 return False
692 else: 693 else:
693 #we don't yet handle special, so we need to fall back to /bin/mv 694 #we don't yet handle special, so we need to fall back to /bin/mv
694 a=getstatusoutput("/bin/cp -f "+"'"+src+"' '"+dest+"'") 695 a = getstatusoutput("/bin/cp -f " + "'" + src + "' '" + dest + "'")
695 if a[0]!=0: 696 if a[0] != 0:
696 print "copyfile: Failed to copy special file:" + src + "' to '" + dest + "'", a 697 print "copyfile: Failed to copy special file:" + src + "' to '" + dest + "'", a
697 return False # failure 698 return False # failure
698 try: 699 try:
699 os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) 700 os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID])
700 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown 701 os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown
701 except Exception, e: 702 except Exception, e:
702 print "copyfile: Failed to chown/chmod/unlink", dest, e 703 print "copyfile: Failed to chown/chmod/unlink", dest, e
703 return False 704 return False
704 705
705 if newmtime: 706 if newmtime:
706 os.utime(dest,(newmtime,newmtime)) 707 os.utime(dest, (newmtime, newmtime))
707 else: 708 else:
708 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) 709 os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME]))
709 newmtime=sstat[stat.ST_MTIME] 710 newmtime = sstat[stat.ST_MTIME]
710 return newmtime 711 return newmtime
711 712
712def which(path, item, direction = 0): 713def which(path, item, direction = 0):