diff options
Diffstat (limited to 'bitbake')
26 files changed, 268 insertions, 269 deletions
diff --git a/bitbake/lib/bb/build.py b/bitbake/lib/bb/build.py index 3d71013998..4f14e63ac7 100644 --- a/bitbake/lib/bb/build.py +++ b/bitbake/lib/bb/build.py | |||
@@ -140,7 +140,7 @@ def exec_func(func, d, dirs = None): | |||
140 | so = os.popen("tee \"%s\"" % logfile, "w") | 140 | so = os.popen("tee \"%s\"" % logfile, "w") |
141 | else: | 141 | else: |
142 | so = file(logfile, 'w') | 142 | so = file(logfile, 'w') |
143 | except OSError, e: | 143 | except OSError as e: |
144 | bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e) | 144 | bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e) |
145 | pass | 145 | pass |
146 | 146 | ||
@@ -285,7 +285,7 @@ def exec_task(task, d): | |||
285 | event.fire(TaskStarted(task, localdata), localdata) | 285 | event.fire(TaskStarted(task, localdata), localdata) |
286 | exec_func(task, localdata) | 286 | exec_func(task, localdata) |
287 | event.fire(TaskSucceeded(task, localdata), localdata) | 287 | event.fire(TaskSucceeded(task, localdata), localdata) |
288 | except FuncFailed, message: | 288 | except FuncFailed as message: |
289 | # Try to extract the optional logfile | 289 | # Try to extract the optional logfile |
290 | try: | 290 | try: |
291 | (msg, logfile) = message | 291 | (msg, logfile) = message |
diff --git a/bitbake/lib/bb/cache.py b/bitbake/lib/bb/cache.py index 0d165aec2f..6e124b2e83 100644 --- a/bitbake/lib/bb/cache.py +++ b/bitbake/lib/bb/cache.py | |||
@@ -61,7 +61,7 @@ class Cache: | |||
61 | return | 61 | return |
62 | 62 | ||
63 | self.has_cache = True | 63 | self.has_cache = True |
64 | self.cachefile = os.path.join(self.cachedir,"bb_cache.dat") | 64 | self.cachefile = os.path.join(self.cachedir, "bb_cache.dat") |
65 | 65 | ||
66 | bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir) | 66 | bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir) |
67 | try: | 67 | try: |
@@ -82,9 +82,9 @@ class Cache: | |||
82 | p = pickle.Unpickler(file(self.cachefile, "rb")) | 82 | p = pickle.Unpickler(file(self.cachefile, "rb")) |
83 | self.depends_cache, version_data = p.load() | 83 | self.depends_cache, version_data = p.load() |
84 | if version_data['CACHE_VER'] != __cache_version__: | 84 | if version_data['CACHE_VER'] != __cache_version__: |
85 | raise ValueError, 'Cache Version Mismatch' | 85 | raise ValueError('Cache Version Mismatch') |
86 | if version_data['BITBAKE_VER'] != bb.__version__: | 86 | if version_data['BITBAKE_VER'] != bb.__version__: |
87 | raise ValueError, 'Bitbake Version Mismatch' | 87 | raise ValueError('Bitbake Version Mismatch') |
88 | except EOFError: | 88 | except EOFError: |
89 | bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...") | 89 | bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...") |
90 | self.depends_cache = {} | 90 | self.depends_cache = {} |
@@ -446,7 +446,7 @@ class Cache: | |||
446 | self.getVar('__BB_DONT_CACHE', file_name, True) | 446 | self.getVar('__BB_DONT_CACHE', file_name, True) |
447 | self.getVar('__VARIANTS', file_name, True) | 447 | self.getVar('__VARIANTS', file_name, True) |
448 | 448 | ||
449 | def load_bbfile( self, bbfile , config): | 449 | def load_bbfile( self, bbfile, config): |
450 | """ | 450 | """ |
451 | Load and parse one .bb build file | 451 | Load and parse one .bb build file |
452 | Return the data and whether parsing resulted in the file being skipped | 452 | Return the data and whether parsing resulted in the file being skipped |
diff --git a/bitbake/lib/bb/daemonize.py b/bitbake/lib/bb/daemonize.py index a944af2238..f0714b3af6 100644 --- a/bitbake/lib/bb/daemonize.py +++ b/bitbake/lib/bb/daemonize.py | |||
@@ -1,190 +1,190 @@ | |||
1 | """ | 1 | """ |
2 | Python Deamonizing helper | 2 | Python Deamonizing helper |
3 | 3 | ||
4 | Configurable daemon behaviors: | 4 | Configurable daemon behaviors: |
5 | 5 | ||
6 | 1.) The current working directory set to the "/" directory. | 6 | 1.) The current working directory set to the "/" directory. |
7 | 2.) The current file creation mode mask set to 0. | 7 | 2.) The current file creation mode mask set to 0. |
8 | 3.) Close all open files (1024). | 8 | 3.) Close all open files (1024). |
9 | 4.) Redirect standard I/O streams to "/dev/null". | 9 | 4.) Redirect standard I/O streams to "/dev/null". |
10 | 10 | ||
11 | A failed call to fork() now raises an exception. | 11 | A failed call to fork() now raises an exception. |
12 | 12 | ||
13 | References: | 13 | References: |
14 | 1) Advanced Programming in the Unix Environment: W. Richard Stevens | 14 | 1) Advanced Programming in the Unix Environment: W. Richard Stevens |
15 | 2) Unix Programming Frequently Asked Questions: | 15 | 2) Unix Programming Frequently Asked Questions: |
16 | http://www.erlenstar.demon.co.uk/unix/faq_toc.html | 16 | http://www.erlenstar.demon.co.uk/unix/faq_toc.html |
17 | 17 | ||
18 | Modified to allow a function to be daemonized and return for | 18 | Modified to allow a function to be daemonized and return for |
19 | bitbake use by Richard Purdie | 19 | bitbake use by Richard Purdie |
20 | """ | 20 | """ |
21 | 21 | ||
22 | __author__ = "Chad J. Schroeder" | 22 | __author__ = "Chad J. Schroeder" |
23 | __copyright__ = "Copyright (C) 2005 Chad J. Schroeder" | 23 | __copyright__ = "Copyright (C) 2005 Chad J. Schroeder" |
24 | __version__ = "0.2" | 24 | __version__ = "0.2" |
25 | 25 | ||
26 | # Standard Python modules. | 26 | # Standard Python modules. |
27 | import os # Miscellaneous OS interfaces. | 27 | import os # Miscellaneous OS interfaces. |
28 | import sys # System-specific parameters and functions. | 28 | import sys # System-specific parameters and functions. |
29 | 29 | ||
30 | # Default daemon parameters. | 30 | # Default daemon parameters. |
31 | # File mode creation mask of the daemon. | 31 | # File mode creation mask of the daemon. |
32 | # For BitBake's children, we do want to inherit the parent umask. | 32 | # For BitBake's children, we do want to inherit the parent umask. |
33 | UMASK = None | 33 | UMASK = None |
34 | 34 | ||
35 | # Default maximum for the number of available file descriptors. | 35 | # Default maximum for the number of available file descriptors. |
36 | MAXFD = 1024 | 36 | MAXFD = 1024 |
37 | 37 | ||
38 | # The standard I/O file descriptors are redirected to /dev/null by default. | 38 | # The standard I/O file descriptors are redirected to /dev/null by default. |
39 | if (hasattr(os, "devnull")): | 39 | if (hasattr(os, "devnull")): |
40 | REDIRECT_TO = os.devnull | 40 | REDIRECT_TO = os.devnull |
41 | else: | 41 | else: |
42 | REDIRECT_TO = "/dev/null" | 42 | REDIRECT_TO = "/dev/null" |
43 | 43 | ||
44 | def createDaemon(function, logfile): | 44 | def createDaemon(function, logfile): |
45 | """ | 45 | """ |
46 | Detach a process from the controlling terminal and run it in the | 46 | Detach a process from the controlling terminal and run it in the |
47 | background as a daemon, returning control to the caller. | 47 | background as a daemon, returning control to the caller. |
48 | """ | 48 | """ |
49 | 49 | ||
50 | try: | 50 | try: |
51 | # Fork a child process so the parent can exit. This returns control to | 51 | # Fork a child process so the parent can exit. This returns control to |
52 | # the command-line or shell. It also guarantees that the child will not | 52 | # the command-line or shell. It also guarantees that the child will not |
53 | # be a process group leader, since the child receives a new process ID | 53 | # be a process group leader, since the child receives a new process ID |
54 | # and inherits the parent's process group ID. This step is required | 54 | # and inherits the parent's process group ID. This step is required |
55 | # to insure that the next call to os.setsid is successful. | 55 | # to insure that the next call to os.setsid is successful. |
56 | pid = os.fork() | 56 | pid = os.fork() |
57 | except OSError, e: | 57 | except OSError as e: |
58 | raise Exception, "%s [%d]" % (e.strerror, e.errno) | 58 | raise Exception("%s [%d]" % (e.strerror, e.errno)) |
59 | 59 | ||
60 | if (pid == 0): # The first child. | 60 | if (pid == 0): # The first child. |
61 | # To become the session leader of this new session and the process group | 61 | # To become the session leader of this new session and the process group |
62 | # leader of the new process group, we call os.setsid(). The process is | 62 | # leader of the new process group, we call os.setsid(). The process is |
63 | # also guaranteed not to have a controlling terminal. | 63 | # also guaranteed not to have a controlling terminal. |
64 | os.setsid() | 64 | os.setsid() |
65 | 65 | ||
66 | # Is ignoring SIGHUP necessary? | 66 | # Is ignoring SIGHUP necessary? |
67 | # | 67 | # |
68 | # It's often suggested that the SIGHUP signal should be ignored before | 68 | # It's often suggested that the SIGHUP signal should be ignored before |
69 | # the second fork to avoid premature termination of the process. The | 69 | # the second fork to avoid premature termination of the process. The |
70 | # reason is that when the first child terminates, all processes, e.g. | 70 | # reason is that when the first child terminates, all processes, e.g. |
71 | # the second child, in the orphaned group will be sent a SIGHUP. | 71 | # the second child, in the orphaned group will be sent a SIGHUP. |
72 | # | 72 | # |
73 | # "However, as part of the session management system, there are exactly | 73 | # "However, as part of the session management system, there are exactly |
74 | # two cases where SIGHUP is sent on the death of a process: | 74 | # two cases where SIGHUP is sent on the death of a process: |
75 | # | 75 | # |
76 | # 1) When the process that dies is the session leader of a session that | 76 | # 1) When the process that dies is the session leader of a session that |
77 | # is attached to a terminal device, SIGHUP is sent to all processes | 77 | # is attached to a terminal device, SIGHUP is sent to all processes |
78 | # in the foreground process group of that terminal device. | 78 | # in the foreground process group of that terminal device. |
79 | # 2) When the death of a process causes a process group to become | 79 | # 2) When the death of a process causes a process group to become |
80 | # orphaned, and one or more processes in the orphaned group are | 80 | # orphaned, and one or more processes in the orphaned group are |
81 | # stopped, then SIGHUP and SIGCONT are sent to all members of the | 81 | # stopped, then SIGHUP and SIGCONT are sent to all members of the |
82 | # orphaned group." [2] | 82 | # orphaned group." [2] |
83 | # | 83 | # |
84 | # The first case can be ignored since the child is guaranteed not to have | 84 | # The first case can be ignored since the child is guaranteed not to have |
85 | # a controlling terminal. The second case isn't so easy to dismiss. | 85 | # a controlling terminal. The second case isn't so easy to dismiss. |
86 | # The process group is orphaned when the first child terminates and | 86 | # The process group is orphaned when the first child terminates and |
87 | # POSIX.1 requires that every STOPPED process in an orphaned process | 87 | # POSIX.1 requires that every STOPPED process in an orphaned process |
88 | # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the | 88 | # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the |
89 | # second child is not STOPPED though, we can safely forego ignoring the | 89 | # second child is not STOPPED though, we can safely forego ignoring the |
90 | # SIGHUP signal. In any case, there are no ill-effects if it is ignored. | 90 | # SIGHUP signal. In any case, there are no ill-effects if it is ignored. |
91 | # | 91 | # |
92 | # import signal # Set handlers for asynchronous events. | 92 | # import signal # Set handlers for asynchronous events. |
93 | # signal.signal(signal.SIGHUP, signal.SIG_IGN) | 93 | # signal.signal(signal.SIGHUP, signal.SIG_IGN) |
94 | 94 | ||
95 | try: | 95 | try: |
96 | # Fork a second child and exit immediately to prevent zombies. This | 96 | # Fork a second child and exit immediately to prevent zombies. This |
97 | # causes the second child process to be orphaned, making the init | 97 | # causes the second child process to be orphaned, making the init |
98 | # process responsible for its cleanup. And, since the first child is | 98 | # process responsible for its cleanup. And, since the first child is |
99 | # a session leader without a controlling terminal, it's possible for | 99 | # a session leader without a controlling terminal, it's possible for |
100 | # it to acquire one by opening a terminal in the future (System V- | 100 | # it to acquire one by opening a terminal in the future (System V- |
101 | # based systems). This second fork guarantees that the child is no | 101 | # based systems). This second fork guarantees that the child is no |
102 | # longer a session leader, preventing the daemon from ever acquiring | 102 | # longer a session leader, preventing the daemon from ever acquiring |
103 | # a controlling terminal. | 103 | # a controlling terminal. |
104 | pid = os.fork() # Fork a second child. | 104 | pid = os.fork() # Fork a second child. |
105 | except OSError, e: | 105 | except OSError as e: |
106 | raise Exception, "%s [%d]" % (e.strerror, e.errno) | 106 | raise Exception("%s [%d]" % (e.strerror, e.errno)) |
107 | 107 | ||
108 | if (pid == 0): # The second child. | 108 | if (pid == 0): # The second child. |
109 | # We probably don't want the file mode creation mask inherited from | 109 | # We probably don't want the file mode creation mask inherited from |
110 | # the parent, so we give the child complete control over permissions. | 110 | # the parent, so we give the child complete control over permissions. |
111 | if UMASK is not None: | 111 | if UMASK is not None: |
112 | os.umask(UMASK) | 112 | os.umask(UMASK) |
113 | else: | 113 | else: |
114 | # Parent (the first child) of the second child. | 114 | # Parent (the first child) of the second child. |
115 | os._exit(0) | 115 | os._exit(0) |
116 | else: | 116 | else: |
117 | # exit() or _exit()? | 117 | # exit() or _exit()? |
118 | # _exit is like exit(), but it doesn't call any functions registered | 118 | # _exit is like exit(), but it doesn't call any functions registered |
119 | # with atexit (and on_exit) or any registered signal handlers. It also | 119 | # with atexit (and on_exit) or any registered signal handlers. It also |
120 | # closes any open file descriptors. Using exit() may cause all stdio | 120 | # closes any open file descriptors. Using exit() may cause all stdio |
121 | # streams to be flushed twice and any temporary files may be unexpectedly | 121 | # streams to be flushed twice and any temporary files may be unexpectedly |
122 | # removed. It's therefore recommended that child branches of a fork() | 122 | # removed. It's therefore recommended that child branches of a fork() |
123 | # and the parent branch(es) of a daemon use _exit(). | 123 | # and the parent branch(es) of a daemon use _exit(). |
124 | return | 124 | return |
125 | 125 | ||
126 | # Close all open file descriptors. This prevents the child from keeping | 126 | # Close all open file descriptors. This prevents the child from keeping |
127 | # open any file descriptors inherited from the parent. There is a variety | 127 | # open any file descriptors inherited from the parent. There is a variety |
128 | # of methods to accomplish this task. Three are listed below. | 128 | # of methods to accomplish this task. Three are listed below. |
129 | # | 129 | # |
130 | # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum | 130 | # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum |
131 | # number of open file descriptors to close. If it doesn't exists, use | 131 | # number of open file descriptors to close. If it doesn't exists, use |
132 | # the default value (configurable). | 132 | # the default value (configurable). |
133 | # | 133 | # |
134 | # try: | 134 | # try: |
135 | # maxfd = os.sysconf("SC_OPEN_MAX") | 135 | # maxfd = os.sysconf("SC_OPEN_MAX") |
136 | # except (AttributeError, ValueError): | 136 | # except (AttributeError, ValueError): |
137 | # maxfd = MAXFD | 137 | # maxfd = MAXFD |
138 | # | 138 | # |
139 | # OR | 139 | # OR |
140 | # | 140 | # |
141 | # if (os.sysconf_names.has_key("SC_OPEN_MAX")): | 141 | # if (os.sysconf_names.has_key("SC_OPEN_MAX")): |
142 | # maxfd = os.sysconf("SC_OPEN_MAX") | 142 | # maxfd = os.sysconf("SC_OPEN_MAX") |
143 | # else: | 143 | # else: |
144 | # maxfd = MAXFD | 144 | # maxfd = MAXFD |
145 | # | 145 | # |
146 | # OR | 146 | # OR |
147 | # | 147 | # |
148 | # Use the getrlimit method to retrieve the maximum file descriptor number | 148 | # Use the getrlimit method to retrieve the maximum file descriptor number |
149 | # that can be opened by this process. If there is not limit on the | 149 | # that can be opened by this process. If there is not limit on the |
150 | # resource, use the default value. | 150 | # resource, use the default value. |
151 | # | 151 | # |
152 | import resource # Resource usage information. | 152 | import resource # Resource usage information. |
153 | maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] | 153 | maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] |
154 | if (maxfd == resource.RLIM_INFINITY): | 154 | if (maxfd == resource.RLIM_INFINITY): |
155 | maxfd = MAXFD | 155 | maxfd = MAXFD |
156 | 156 | ||
157 | # Iterate through and close all file descriptors. | 157 | # Iterate through and close all file descriptors. |
158 | # for fd in range(0, maxfd): | 158 | # for fd in range(0, maxfd): |
159 | # try: | 159 | # try: |
160 | # os.close(fd) | 160 | # os.close(fd) |
161 | # except OSError: # ERROR, fd wasn't open to begin with (ignored) | 161 | # except OSError: # ERROR, fd wasn't open to begin with (ignored) |
162 | # pass | 162 | # pass |
163 | 163 | ||
164 | # Redirect the standard I/O file descriptors to the specified file. Since | 164 | # Redirect the standard I/O file descriptors to the specified file. Since |
165 | # the daemon has no controlling terminal, most daemons redirect stdin, | 165 | # the daemon has no controlling terminal, most daemons redirect stdin, |
166 | # stdout, and stderr to /dev/null. This is done to prevent side-effects | 166 | # stdout, and stderr to /dev/null. This is done to prevent side-effects |
167 | # from reads and writes to the standard I/O file descriptors. | 167 | # from reads and writes to the standard I/O file descriptors. |
168 | 168 | ||
169 | # This call to open is guaranteed to return the lowest file descriptor, | 169 | # This call to open is guaranteed to return the lowest file descriptor, |
170 | # which will be 0 (stdin), since it was closed above. | 170 | # which will be 0 (stdin), since it was closed above. |
171 | # os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) | 171 | # os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) |
172 | 172 | ||
173 | # Duplicate standard input to standard output and standard error. | 173 | # Duplicate standard input to standard output and standard error. |
174 | # os.dup2(0, 1) # standard output (1) | 174 | # os.dup2(0, 1) # standard output (1) |
175 | # os.dup2(0, 2) # standard error (2) | 175 | # os.dup2(0, 2) # standard error (2) |
176 | 176 | ||
177 | 177 | ||
178 | si = file('/dev/null', 'r') | 178 | si = file('/dev/null', 'r') |
179 | so = file(logfile, 'w') | 179 | so = file(logfile, 'w') |
180 | se = so | 180 | se = so |
181 | 181 | ||
182 | 182 | ||
183 | # Replace those fds with our own | 183 | # Replace those fds with our own |
184 | os.dup2(si.fileno(), sys.stdin.fileno()) | 184 | os.dup2(si.fileno(), sys.stdin.fileno()) |
185 | os.dup2(so.fileno(), sys.stdout.fileno()) | 185 | os.dup2(so.fileno(), sys.stdout.fileno()) |
186 | os.dup2(se.fileno(), sys.stderr.fileno()) | 186 | os.dup2(se.fileno(), sys.stderr.fileno()) |
187 | 187 | ||
188 | function() | 188 | function() |
189 | 189 | ||
190 | os._exit(0) | 190 | os._exit(0) |
diff --git a/bitbake/lib/bb/data.py b/bitbake/lib/bb/data.py index 85de6bfeb3..e401c53429 100644 --- a/bitbake/lib/bb/data.py +++ b/bitbake/lib/bb/data.py | |||
@@ -193,7 +193,7 @@ def emit_var(var, o=sys.__stdout__, d = init(), all=False): | |||
193 | if all: | 193 | if all: |
194 | o.write('# %s=%s\n' % (var, oval)) | 194 | o.write('# %s=%s\n' % (var, oval)) |
195 | 195 | ||
196 | if type(val) is not types.StringType: | 196 | if not isinstance(val, types.StringType): |
197 | return 0 | 197 | return 0 |
198 | 198 | ||
199 | if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all: | 199 | if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all: |
diff --git a/bitbake/lib/bb/data_smart.py b/bitbake/lib/bb/data_smart.py index 2edeec064e..1704ed631c 100644 --- a/bitbake/lib/bb/data_smart.py +++ b/bitbake/lib/bb/data_smart.py | |||
@@ -66,10 +66,10 @@ class DataSmart: | |||
66 | code = match.group()[3:-1] | 66 | code = match.group()[3:-1] |
67 | codeobj = compile(code.strip(), varname or "<expansion>", "eval") | 67 | codeobj = compile(code.strip(), varname or "<expansion>", "eval") |
68 | s = utils.better_eval(codeobj, {"d": self}) | 68 | s = utils.better_eval(codeobj, {"d": self}) |
69 | if type(s) == types.IntType: s = str(s) | 69 | if isinstance(s, types.IntType): s = str(s) |
70 | return s | 70 | return s |
71 | 71 | ||
72 | if type(s) is not types.StringType: # sanity check | 72 | if not isinstance(s, types.StringType): # sanity check |
73 | return s | 73 | return s |
74 | 74 | ||
75 | if varname and varname in self.expand_cache: | 75 | if varname and varname in self.expand_cache: |
@@ -81,7 +81,7 @@ class DataSmart: | |||
81 | s = __expand_var_regexp__.sub(var_sub, s) | 81 | s = __expand_var_regexp__.sub(var_sub, s) |
82 | s = __expand_python_regexp__.sub(python_sub, s) | 82 | s = __expand_python_regexp__.sub(python_sub, s) |
83 | if s == olds: break | 83 | if s == olds: break |
84 | if type(s) is not types.StringType: # sanity check | 84 | if not isinstance(s, types.StringType): # sanity check |
85 | bb.msg.error(bb.msg.domain.Data, 'expansion of %s returned non-string %s' % (olds, s)) | 85 | bb.msg.error(bb.msg.domain.Data, 'expansion of %s returned non-string %s' % (olds, s)) |
86 | except KeyboardInterrupt: | 86 | except KeyboardInterrupt: |
87 | raise | 87 | raise |
@@ -118,7 +118,7 @@ class DataSmart: | |||
118 | l = len(o)+1 | 118 | l = len(o)+1 |
119 | 119 | ||
120 | # see if one should even try | 120 | # see if one should even try |
121 | if not self._seen_overrides.has_key(o): | 121 | if o not in self._seen_overrides: |
122 | continue | 122 | continue |
123 | 123 | ||
124 | vars = self._seen_overrides[o] | 124 | vars = self._seen_overrides[o] |
@@ -130,7 +130,7 @@ class DataSmart: | |||
130 | bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar") | 130 | bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar") |
131 | 131 | ||
132 | # now on to the appends and prepends | 132 | # now on to the appends and prepends |
133 | if self._special_values.has_key("_append"): | 133 | if "_append" in self._special_values: |
134 | appends = self._special_values['_append'] or [] | 134 | appends = self._special_values['_append'] or [] |
135 | for append in appends: | 135 | for append in appends: |
136 | for (a, o) in self.getVarFlag(append, '_append') or []: | 136 | for (a, o) in self.getVarFlag(append, '_append') or []: |
@@ -145,7 +145,7 @@ class DataSmart: | |||
145 | self.setVar(append, sval) | 145 | self.setVar(append, sval) |
146 | 146 | ||
147 | 147 | ||
148 | if self._special_values.has_key("_prepend"): | 148 | if "_prepend" in self._special_values: |
149 | prepends = self._special_values['_prepend'] or [] | 149 | prepends = self._special_values['_prepend'] or [] |
150 | 150 | ||
151 | for prepend in prepends: | 151 | for prepend in prepends: |
@@ -215,7 +215,7 @@ class DataSmart: | |||
215 | # more cookies for the cookie monster | 215 | # more cookies for the cookie monster |
216 | if '_' in var: | 216 | if '_' in var: |
217 | override = var[var.rfind('_')+1:] | 217 | override = var[var.rfind('_')+1:] |
218 | if not self._seen_overrides.has_key(override): | 218 | if override not in self._seen_overrides: |
219 | self._seen_overrides[override] = set() | 219 | self._seen_overrides[override] = set() |
220 | self._seen_overrides[override].add( var ) | 220 | self._seen_overrides[override].add( var ) |
221 | 221 | ||
@@ -246,7 +246,7 @@ class DataSmart: | |||
246 | dest.extend(src) | 246 | dest.extend(src) |
247 | self.setVarFlag(newkey, i, dest) | 247 | self.setVarFlag(newkey, i, dest) |
248 | 248 | ||
249 | if self._special_values.has_key(i) and key in self._special_values[i]: | 249 | if i in self._special_values and key in self._special_values[i]: |
250 | self._special_values[i].remove(key) | 250 | self._special_values[i].remove(key) |
251 | self._special_values[i].add(newkey) | 251 | self._special_values[i].add(newkey) |
252 | 252 | ||
diff --git a/bitbake/lib/bb/fetch/cvs.py b/bitbake/lib/bb/fetch/cvs.py index c0d43618f9..61976f7ef4 100644 --- a/bitbake/lib/bb/fetch/cvs.py +++ b/bitbake/lib/bb/fetch/cvs.py | |||
@@ -139,8 +139,8 @@ class Cvs(Fetch): | |||
139 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory") | 139 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory") |
140 | pkg = data.expand('${PN}', d) | 140 | pkg = data.expand('${PN}', d) |
141 | pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) | 141 | pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) |
142 | moddir = os.path.join(pkgdir,localdir) | 142 | moddir = os.path.join(pkgdir, localdir) |
143 | if os.access(os.path.join(moddir,'CVS'), os.R_OK): | 143 | if os.access(os.path.join(moddir, 'CVS'), os.R_OK): |
144 | bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) | 144 | bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) |
145 | # update sources there | 145 | # update sources there |
146 | os.chdir(moddir) | 146 | os.chdir(moddir) |
diff --git a/bitbake/lib/bb/fetch/perforce.py b/bitbake/lib/bb/fetch/perforce.py index 67de6f59fa..5b6c601876 100644 --- a/bitbake/lib/bb/fetch/perforce.py +++ b/bitbake/lib/bb/fetch/perforce.py | |||
@@ -35,15 +35,15 @@ class Perforce(Fetch): | |||
35 | def supports(self, url, ud, d): | 35 | def supports(self, url, ud, d): |
36 | return ud.type in ['p4'] | 36 | return ud.type in ['p4'] |
37 | 37 | ||
38 | def doparse(url,d): | 38 | def doparse(url, d): |
39 | parm = {} | 39 | parm = {} |
40 | path = url.split("://")[1] | 40 | path = url.split("://")[1] |
41 | delim = path.find("@"); | 41 | delim = path.find("@"); |
42 | if delim != -1: | 42 | if delim != -1: |
43 | (user,pswd,host,port) = path.split('@')[0].split(":") | 43 | (user, pswd, host, port) = path.split('@')[0].split(":") |
44 | path = path.split('@')[1] | 44 | path = path.split('@')[1] |
45 | else: | 45 | else: |
46 | (host,port) = data.getVar('P4PORT', d).split(':') | 46 | (host, port) = data.getVar('P4PORT', d).split(':') |
47 | user = "" | 47 | user = "" |
48 | pswd = "" | 48 | pswd = "" |
49 | 49 | ||
@@ -53,19 +53,19 @@ class Perforce(Fetch): | |||
53 | plist = path.split(';') | 53 | plist = path.split(';') |
54 | for item in plist: | 54 | for item in plist: |
55 | if item.count('='): | 55 | if item.count('='): |
56 | (key,value) = item.split('=') | 56 | (key, value) = item.split('=') |
57 | keys.append(key) | 57 | keys.append(key) |
58 | values.append(value) | 58 | values.append(value) |
59 | 59 | ||
60 | parm = dict(zip(keys,values)) | 60 | parm = dict(zip(keys, values)) |
61 | path = "//" + path.split(';')[0] | 61 | path = "//" + path.split(';')[0] |
62 | host += ":%s" % (port) | 62 | host += ":%s" % (port) |
63 | parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm) | 63 | parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm) |
64 | 64 | ||
65 | return host,path,user,pswd,parm | 65 | return host, path, user, pswd, parm |
66 | doparse = staticmethod(doparse) | 66 | doparse = staticmethod(doparse) |
67 | 67 | ||
68 | def getcset(d, depot,host,user,pswd,parm): | 68 | def getcset(d, depot, host, user, pswd, parm): |
69 | p4opt = "" | 69 | p4opt = "" |
70 | if "cset" in parm: | 70 | if "cset" in parm: |
71 | return parm["cset"]; | 71 | return parm["cset"]; |
@@ -97,7 +97,7 @@ class Perforce(Fetch): | |||
97 | 97 | ||
98 | def localpath(self, url, ud, d): | 98 | def localpath(self, url, ud, d): |
99 | 99 | ||
100 | (host,path,user,pswd,parm) = Perforce.doparse(url,d) | 100 | (host, path, user, pswd, parm) = Perforce.doparse(url, d) |
101 | 101 | ||
102 | # If a label is specified, we use that as our filename | 102 | # If a label is specified, we use that as our filename |
103 | 103 | ||
@@ -115,7 +115,7 @@ class Perforce(Fetch): | |||
115 | 115 | ||
116 | cset = Perforce.getcset(d, path, host, user, pswd, parm) | 116 | cset = Perforce.getcset(d, path, host, user, pswd, parm) |
117 | 117 | ||
118 | ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host,base.replace('/', '.'), cset), d) | 118 | ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host, base.replace('/', '.'), cset), d) |
119 | 119 | ||
120 | return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile) | 120 | return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile) |
121 | 121 | ||
@@ -124,7 +124,7 @@ class Perforce(Fetch): | |||
124 | Fetch urls | 124 | Fetch urls |
125 | """ | 125 | """ |
126 | 126 | ||
127 | (host,depot,user,pswd,parm) = Perforce.doparse(loc, d) | 127 | (host, depot, user, pswd, parm) = Perforce.doparse(loc, d) |
128 | 128 | ||
129 | if depot.find('/...') != -1: | 129 | if depot.find('/...') != -1: |
130 | path = depot[:depot.find('/...')] | 130 | path = depot[:depot.find('/...')] |
@@ -164,10 +164,10 @@ class Perforce(Fetch): | |||
164 | raise FetchError(module) | 164 | raise FetchError(module) |
165 | 165 | ||
166 | if "label" in parm: | 166 | if "label" in parm: |
167 | depot = "%s@%s" % (depot,parm["label"]) | 167 | depot = "%s@%s" % (depot, parm["label"]) |
168 | else: | 168 | else: |
169 | cset = Perforce.getcset(d, depot, host, user, pswd, parm) | 169 | cset = Perforce.getcset(d, depot, host, user, pswd, parm) |
170 | depot = "%s@%s" % (depot,cset) | 170 | depot = "%s@%s" % (depot, cset) |
171 | 171 | ||
172 | os.chdir(tmpfile) | 172 | os.chdir(tmpfile) |
173 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | 173 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) |
@@ -189,7 +189,7 @@ class Perforce(Fetch): | |||
189 | dest = list[0][len(path)+1:] | 189 | dest = list[0][len(path)+1:] |
190 | where = dest.find("#") | 190 | where = dest.find("#") |
191 | 191 | ||
192 | os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module,dest[:where],list[0])) | 192 | os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module, dest[:where], list[0])) |
193 | count = count + 1 | 193 | count = count + 1 |
194 | 194 | ||
195 | if count == 0: | 195 | if count == 0: |
diff --git a/bitbake/lib/bb/fetch/wget.py b/bitbake/lib/bb/fetch/wget.py index 8b687372a4..581362038a 100644 --- a/bitbake/lib/bb/fetch/wget.py +++ b/bitbake/lib/bb/fetch/wget.py | |||
@@ -38,7 +38,7 @@ class Wget(Fetch): | |||
38 | """ | 38 | """ |
39 | Check to see if a given url can be fetched with wget. | 39 | Check to see if a given url can be fetched with wget. |
40 | """ | 40 | """ |
41 | return ud.type in ['http','https','ftp'] | 41 | return ud.type in ['http', 'https', 'ftp'] |
42 | 42 | ||
43 | def localpath(self, url, ud, d): | 43 | def localpath(self, url, ud, d): |
44 | 44 | ||
diff --git a/bitbake/lib/bb/parse/__init__.py b/bitbake/lib/bb/parse/__init__.py index adc1408b9e..4b957884cc 100644 --- a/bitbake/lib/bb/parse/__init__.py +++ b/bitbake/lib/bb/parse/__init__.py | |||
@@ -37,12 +37,12 @@ class SkipPackage(Exception): | |||
37 | 37 | ||
38 | __mtime_cache = {} | 38 | __mtime_cache = {} |
39 | def cached_mtime(f): | 39 | def cached_mtime(f): |
40 | if not __mtime_cache.has_key(f): | 40 | if f not in __mtime_cache: |
41 | __mtime_cache[f] = os.stat(f)[8] | 41 | __mtime_cache[f] = os.stat(f)[8] |
42 | return __mtime_cache[f] | 42 | return __mtime_cache[f] |
43 | 43 | ||
44 | def cached_mtime_noerror(f): | 44 | def cached_mtime_noerror(f): |
45 | if not __mtime_cache.has_key(f): | 45 | if f not in __mtime_cache: |
46 | try: | 46 | try: |
47 | __mtime_cache[f] = os.stat(f)[8] | 47 | __mtime_cache[f] = os.stat(f)[8] |
48 | except OSError: | 48 | except OSError: |
diff --git a/bitbake/lib/bb/parse/ast.py b/bitbake/lib/bb/parse/ast.py index a586c5cde1..e7d389e7a5 100644 --- a/bitbake/lib/bb/parse/ast.py +++ b/bitbake/lib/bb/parse/ast.py | |||
@@ -311,7 +311,7 @@ def finalize(fn, d): | |||
311 | all_handlers = {} | 311 | all_handlers = {} |
312 | for var in bb.data.getVar('__BBHANDLERS', d) or []: | 312 | for var in bb.data.getVar('__BBHANDLERS', d) or []: |
313 | # try to add the handler | 313 | # try to add the handler |
314 | handler = bb.data.getVar(var,d) | 314 | handler = bb.data.getVar(var, d) |
315 | bb.event.register(var, handler) | 315 | bb.event.register(var, handler) |
316 | 316 | ||
317 | tasklist = bb.data.getVar('__BBTASKS', d) or [] | 317 | tasklist = bb.data.getVar('__BBTASKS', d) or [] |
diff --git a/bitbake/lib/bb/parse/parse_py/BBHandler.py b/bitbake/lib/bb/parse/parse_py/BBHandler.py index a770131fbc..a388773bb7 100644 --- a/bitbake/lib/bb/parse/parse_py/BBHandler.py +++ b/bitbake/lib/bb/parse/parse_py/BBHandler.py | |||
@@ -90,7 +90,7 @@ def get_statements(filename, absolsute_filename, base_name): | |||
90 | statements = ast.StatementGroup() | 90 | statements = ast.StatementGroup() |
91 | 91 | ||
92 | lineno = 0 | 92 | lineno = 0 |
93 | while 1: | 93 | while True: |
94 | lineno = lineno + 1 | 94 | lineno = lineno + 1 |
95 | s = file.readline() | 95 | s = file.readline() |
96 | if not s: break | 96 | if not s: break |
@@ -118,7 +118,7 @@ def handle(fn, d, include): | |||
118 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)") | 118 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)") |
119 | 119 | ||
120 | (root, ext) = os.path.splitext(os.path.basename(fn)) | 120 | (root, ext) = os.path.splitext(os.path.basename(fn)) |
121 | base_name = "%s%s" % (root,ext) | 121 | base_name = "%s%s" % (root, ext) |
122 | init(d) | 122 | init(d) |
123 | 123 | ||
124 | if ext == ".bbclass": | 124 | if ext == ".bbclass": |
@@ -164,7 +164,7 @@ def handle(fn, d, include): | |||
164 | return d | 164 | return d |
165 | 165 | ||
166 | def feeder(lineno, s, fn, root, statements): | 166 | def feeder(lineno, s, fn, root, statements): |
167 | global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, classes, bb, __residue__ | 167 | global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__, __infunc__, __body__, classes, bb, __residue__ |
168 | if __infunc__: | 168 | if __infunc__: |
169 | if s == '}': | 169 | if s == '}': |
170 | __body__.append('') | 170 | __body__.append('') |
diff --git a/bitbake/lib/bb/parse/parse_py/ConfHandler.py b/bitbake/lib/bb/parse/parse_py/ConfHandler.py index 8e17182ba7..9188119e4d 100644 --- a/bitbake/lib/bb/parse/parse_py/ConfHandler.py +++ b/bitbake/lib/bb/parse/parse_py/ConfHandler.py | |||
@@ -89,7 +89,7 @@ def handle(fn, data, include): | |||
89 | 89 | ||
90 | statements = ast.StatementGroup() | 90 | statements = ast.StatementGroup() |
91 | lineno = 0 | 91 | lineno = 0 |
92 | while 1: | 92 | while True: |
93 | lineno = lineno + 1 | 93 | lineno = lineno + 1 |
94 | s = f.readline() | 94 | s = f.readline() |
95 | if not s: break | 95 | if not s: break |
diff --git a/bitbake/lib/bb/persist_data.py b/bitbake/lib/bb/persist_data.py index a26244510a..80ddeb5560 100644 --- a/bitbake/lib/bb/persist_data.py +++ b/bitbake/lib/bb/persist_data.py | |||
@@ -52,7 +52,7 @@ class PersistData: | |||
52 | except OSError: | 52 | except OSError: |
53 | bb.utils.mkdirhier(self.cachedir) | 53 | bb.utils.mkdirhier(self.cachedir) |
54 | 54 | ||
55 | self.cachefile = os.path.join(self.cachedir,"bb_persist_data.sqlite3") | 55 | self.cachefile = os.path.join(self.cachedir, "bb_persist_data.sqlite3") |
56 | bb.msg.debug(1, bb.msg.domain.PersistData, "Using '%s' as the persistent data cache" % self.cachefile) | 56 | bb.msg.debug(1, bb.msg.domain.PersistData, "Using '%s' as the persistent data cache" % self.cachefile) |
57 | 57 | ||
58 | self.connection = sqlite3.connect(self.cachefile, timeout=5, isolation_level=None) | 58 | self.connection = sqlite3.connect(self.cachefile, timeout=5, isolation_level=None) |
@@ -113,7 +113,7 @@ class PersistData: | |||
113 | try: | 113 | try: |
114 | self.connection.execute(*query) | 114 | self.connection.execute(*query) |
115 | return | 115 | return |
116 | except sqlite3.OperationalError, e: | 116 | except sqlite3.OperationalError as e: |
117 | if 'database is locked' in str(e): | 117 | if 'database is locked' in str(e): |
118 | continue | 118 | continue |
119 | raise | 119 | raise |
diff --git a/bitbake/lib/bb/runqueue.py b/bitbake/lib/bb/runqueue.py index de1160eb87..6025142e08 100644 --- a/bitbake/lib/bb/runqueue.py +++ b/bitbake/lib/bb/runqueue.py | |||
@@ -109,8 +109,7 @@ class RunQueueSchedulerSpeed(RunQueueScheduler): | |||
109 | 109 | ||
110 | self.rq = runqueue | 110 | self.rq = runqueue |
111 | 111 | ||
112 | sortweight = deepcopy(self.rq.runq_weight) | 112 | sortweight = sorted(deepcopy(self.rq.runq_weight)) |
113 | sortweight.sort() | ||
114 | copyweight = deepcopy(self.rq.runq_weight) | 113 | copyweight = deepcopy(self.rq.runq_weight) |
115 | self.prio_map = [] | 114 | self.prio_map = [] |
116 | 115 | ||
@@ -307,7 +306,7 @@ class RunQueue: | |||
307 | weight[listid] = 1 | 306 | weight[listid] = 1 |
308 | task_done[listid] = True | 307 | task_done[listid] = True |
309 | 308 | ||
310 | while 1: | 309 | while True: |
311 | next_points = [] | 310 | next_points = [] |
312 | for listid in endpoints: | 311 | for listid in endpoints: |
313 | for revdep in self.runq_depends[listid]: | 312 | for revdep in self.runq_depends[listid]: |
@@ -631,7 +630,7 @@ class RunQueue: | |||
631 | for dep in revdeps: | 630 | for dep in revdeps: |
632 | if dep in self.runq_depends[listid]: | 631 | if dep in self.runq_depends[listid]: |
633 | #self.dump_data(taskData) | 632 | #self.dump_data(taskData) |
634 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) | 633 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep], taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) |
635 | 634 | ||
636 | bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) | 635 | bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) |
637 | 636 | ||
@@ -814,7 +813,7 @@ class RunQueue: | |||
814 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile, stampfile2)) | 813 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile, stampfile2)) |
815 | iscurrent = False | 814 | iscurrent = False |
816 | except: | 815 | except: |
817 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2 , stampfile)) | 816 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2, stampfile)) |
818 | iscurrent = False | 817 | iscurrent = False |
819 | 818 | ||
820 | return iscurrent | 819 | return iscurrent |
@@ -948,7 +947,7 @@ class RunQueue: | |||
948 | try: | 947 | try: |
949 | pipein, pipeout = os.pipe() | 948 | pipein, pipeout = os.pipe() |
950 | pid = os.fork() | 949 | pid = os.fork() |
951 | except OSError, e: | 950 | except OSError as e: |
952 | bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) | 951 | bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) |
953 | if pid == 0: | 952 | if pid == 0: |
954 | os.close(pipein) | 953 | os.close(pipein) |
diff --git a/bitbake/lib/bb/server/none.py b/bitbake/lib/bb/server/none.py index d4b7fdeea6..e28aa8d7d7 100644 --- a/bitbake/lib/bb/server/none.py +++ b/bitbake/lib/bb/server/none.py | |||
@@ -115,7 +115,7 @@ class BitBakeServer(): | |||
115 | 115 | ||
116 | def register_idle_function(self, function, data): | 116 | def register_idle_function(self, function, data): |
117 | """Register a function to be called while the server is idle""" | 117 | """Register a function to be called while the server is idle""" |
118 | assert callable(function) | 118 | assert hasattr(function, '__call__') |
119 | self._idlefuns[function] = data | 119 | self._idlefuns[function] = data |
120 | 120 | ||
121 | def idle_commands(self, delay): | 121 | def idle_commands(self, delay): |
diff --git a/bitbake/lib/bb/server/xmlrpc.py b/bitbake/lib/bb/server/xmlrpc.py index 3844a1e33e..cb2949fb9f 100644 --- a/bitbake/lib/bb/server/xmlrpc.py +++ b/bitbake/lib/bb/server/xmlrpc.py | |||
@@ -112,7 +112,7 @@ class BitBakeServer(SimpleXMLRPCServer): | |||
112 | 112 | ||
113 | def register_idle_function(self, function, data): | 113 | def register_idle_function(self, function, data): |
114 | """Register a function to be called while the server is idle""" | 114 | """Register a function to be called while the server is idle""" |
115 | assert callable(function) | 115 | assert hasattr(function, '__call__') |
116 | self._idlefuns[function] = data | 116 | self._idlefuns[function] = data |
117 | 117 | ||
118 | def serve_forever(self): | 118 | def serve_forever(self): |
diff --git a/bitbake/lib/bb/taskdata.py b/bitbake/lib/bb/taskdata.py index 58e0d9d8f2..d4fd1498b6 100644 --- a/bitbake/lib/bb/taskdata.py +++ b/bitbake/lib/bb/taskdata.py | |||
@@ -34,7 +34,7 @@ def re_match_strings(target, strings): | |||
34 | 34 | ||
35 | for name in strings: | 35 | for name in strings: |
36 | if (name==target or | 36 | if (name==target or |
37 | re.search(name,target)!=None): | 37 | re.search(name, target)!=None): |
38 | return True | 38 | return True |
39 | return False | 39 | return False |
40 | 40 | ||
@@ -539,7 +539,7 @@ class TaskData: | |||
539 | Resolve all unresolved build and runtime targets | 539 | Resolve all unresolved build and runtime targets |
540 | """ | 540 | """ |
541 | bb.msg.note(1, bb.msg.domain.TaskData, "Resolving any missing task queue dependencies") | 541 | bb.msg.note(1, bb.msg.domain.TaskData, "Resolving any missing task queue dependencies") |
542 | while 1: | 542 | while True: |
543 | added = 0 | 543 | added = 0 |
544 | for target in self.get_unresolved_build_targets(dataCache): | 544 | for target in self.get_unresolved_build_targets(dataCache): |
545 | try: | 545 | try: |
diff --git a/bitbake/lib/bb/ui/crumbs/buildmanager.py b/bitbake/lib/bb/ui/crumbs/buildmanager.py index 37a62f189f..b5a4dae0de 100644 --- a/bitbake/lib/bb/ui/crumbs/buildmanager.py +++ b/bitbake/lib/bb/ui/crumbs/buildmanager.py | |||
@@ -157,7 +157,7 @@ class BuildResult(gobject.GObject): | |||
157 | # format build-<year><month><day>-<ordinal> we can easily | 157 | # format build-<year><month><day>-<ordinal> we can easily |
158 | # pull it out. | 158 | # pull it out. |
159 | # TODO: Better to stat a file? | 159 | # TODO: Better to stat a file? |
160 | (_ , date, revision) = identifier.split ("-") | 160 | (_, date, revision) = identifier.split ("-") |
161 | print(date) | 161 | print(date) |
162 | 162 | ||
163 | year = int (date[0:4]) | 163 | year = int (date[0:4]) |
@@ -385,7 +385,7 @@ class BuildManager (gobject.GObject): | |||
385 | build_directory]) | 385 | build_directory]) |
386 | server.runCommand(["buildTargets", [conf.image], "rootfs"]) | 386 | server.runCommand(["buildTargets", [conf.image], "rootfs"]) |
387 | 387 | ||
388 | except Exception, e: | 388 | except Exception as e: |
389 | print(e) | 389 | print(e) |
390 | 390 | ||
391 | class BuildManagerTreeView (gtk.TreeView): | 391 | class BuildManagerTreeView (gtk.TreeView): |
diff --git a/bitbake/lib/bb/ui/crumbs/runningbuild.py b/bitbake/lib/bb/ui/crumbs/runningbuild.py index 79e2c9060d..b4416ecbb3 100644 --- a/bitbake/lib/bb/ui/crumbs/runningbuild.py +++ b/bitbake/lib/bb/ui/crumbs/runningbuild.py | |||
@@ -63,7 +63,7 @@ class RunningBuild (gobject.GObject): | |||
63 | # for the message. | 63 | # for the message. |
64 | if hasattr(event, 'pid'): | 64 | if hasattr(event, 'pid'): |
65 | pid = event.pid | 65 | pid = event.pid |
66 | if self.pids_to_task.has_key(pid): | 66 | if pid in self.pids_to_task: |
67 | (package, task) = self.pids_to_task[pid] | 67 | (package, task) = self.pids_to_task[pid] |
68 | parent = self.tasks_to_iter[(package, task)] | 68 | parent = self.tasks_to_iter[(package, task)] |
69 | 69 | ||
@@ -93,12 +93,12 @@ class RunningBuild (gobject.GObject): | |||
93 | (package, task) = (event._package, event._task) | 93 | (package, task) = (event._package, event._task) |
94 | 94 | ||
95 | # Save out this PID. | 95 | # Save out this PID. |
96 | self.pids_to_task[pid] = (package,task) | 96 | self.pids_to_task[pid] = (package, task) |
97 | 97 | ||
98 | # Check if we already have this package in our model. If so then | 98 | # Check if we already have this package in our model. If so then |
99 | # that can be the parent for the task. Otherwise we create a new | 99 | # that can be the parent for the task. Otherwise we create a new |
100 | # top level for the package. | 100 | # top level for the package. |
101 | if (self.tasks_to_iter.has_key ((package, None))): | 101 | if ((package, None) in self.tasks_to_iter): |
102 | parent = self.tasks_to_iter[(package, None)] | 102 | parent = self.tasks_to_iter[(package, None)] |
103 | else: | 103 | else: |
104 | parent = self.model.append (None, (None, | 104 | parent = self.model.append (None, (None, |
diff --git a/bitbake/lib/bb/ui/depexp.py b/bitbake/lib/bb/ui/depexp.py index e386e34958..1cd58cac18 100644 --- a/bitbake/lib/bb/ui/depexp.py +++ b/bitbake/lib/bb/ui/depexp.py | |||
@@ -207,7 +207,7 @@ def init(server, eventHandler): | |||
207 | if ret != True: | 207 | if ret != True: |
208 | print("Couldn't run command! %s" % ret) | 208 | print("Couldn't run command! %s" % ret) |
209 | return | 209 | return |
210 | except xmlrpclib.Fault, x: | 210 | except xmlrpclib.Fault as x: |
211 | print("XMLRPC Fault getting commandline:\n %s" % x) | 211 | print("XMLRPC Fault getting commandline:\n %s" % x) |
212 | return | 212 | return |
213 | 213 | ||
diff --git a/bitbake/lib/bb/ui/goggle.py b/bitbake/lib/bb/ui/goggle.py index 7a3427f715..2cfa002f8a 100644 --- a/bitbake/lib/bb/ui/goggle.py +++ b/bitbake/lib/bb/ui/goggle.py | |||
@@ -62,7 +62,7 @@ def init (server, eventHandler): | |||
62 | if ret != True: | 62 | if ret != True: |
63 | print("Couldn't get default commandline! %s" % ret) | 63 | print("Couldn't get default commandline! %s" % ret) |
64 | return 1 | 64 | return 1 |
65 | except xmlrpclib.Fault, x: | 65 | except xmlrpclib.Fault as x: |
66 | print("XMLRPC Fault getting commandline:\n %s" % x) | 66 | print("XMLRPC Fault getting commandline:\n %s" % x) |
67 | return 1 | 67 | return 1 |
68 | 68 | ||
diff --git a/bitbake/lib/bb/ui/knotty.py b/bitbake/lib/bb/ui/knotty.py index dba9530ef6..b6ca15b4fb 100644 --- a/bitbake/lib/bb/ui/knotty.py +++ b/bitbake/lib/bb/ui/knotty.py | |||
@@ -46,7 +46,7 @@ def init(server, eventHandler): | |||
46 | if ret != True: | 46 | if ret != True: |
47 | print("Couldn't get default commandline! %s" % ret) | 47 | print("Couldn't get default commandline! %s" % ret) |
48 | return 1 | 48 | return 1 |
49 | except xmlrpclib.Fault, x: | 49 | except xmlrpclib.Fault as x: |
50 | print("XMLRPC Fault getting commandline:\n %s" % x) | 50 | print("XMLRPC Fault getting commandline:\n %s" % x) |
51 | return 1 | 51 | return 1 |
52 | 52 | ||
diff --git a/bitbake/lib/bb/ui/ncurses.py b/bitbake/lib/bb/ui/ncurses.py index 89e67900b2..e3bca2af83 100644 --- a/bitbake/lib/bb/ui/ncurses.py +++ b/bitbake/lib/bb/ui/ncurses.py | |||
@@ -234,7 +234,7 @@ class NCursesUI: | |||
234 | if ret != True: | 234 | if ret != True: |
235 | print("Couldn't get default commandlind! %s" % ret) | 235 | print("Couldn't get default commandlind! %s" % ret) |
236 | return | 236 | return |
237 | except xmlrpclib.Fault, x: | 237 | except xmlrpclib.Fault as x: |
238 | print("XMLRPC Fault getting commandline:\n %s" % x) | 238 | print("XMLRPC Fault getting commandline:\n %s" % x) |
239 | return | 239 | return |
240 | 240 | ||
diff --git a/bitbake/lib/bb/ui/puccho.py b/bitbake/lib/bb/ui/puccho.py index 7dffa5c3ba..2ac025303e 100644 --- a/bitbake/lib/bb/ui/puccho.py +++ b/bitbake/lib/bb/ui/puccho.py | |||
@@ -104,10 +104,10 @@ class MetaDataLoader(gobject.GObject): | |||
104 | gobject.idle_add (MetaDataLoader.emit_success_signal, | 104 | gobject.idle_add (MetaDataLoader.emit_success_signal, |
105 | self.loader) | 105 | self.loader) |
106 | 106 | ||
107 | except MetaDataLoader.LoaderThread.LoaderImportException, e: | 107 | except MetaDataLoader.LoaderThread.LoaderImportException as e: |
108 | gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader, | 108 | gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader, |
109 | "Repository metadata corrupt") | 109 | "Repository metadata corrupt") |
110 | except Exception, e: | 110 | except Exception as e: |
111 | gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader, | 111 | gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader, |
112 | "Unable to download repository metadata") | 112 | "Unable to download repository metadata") |
113 | print(e) | 113 | print(e) |
@@ -211,7 +211,7 @@ class BuildSetupDialog (gtk.Dialog): | |||
211 | # Build | 211 | # Build |
212 | button = gtk.Button ("_Build", None, True) | 212 | button = gtk.Button ("_Build", None, True) |
213 | image = gtk.Image () | 213 | image = gtk.Image () |
214 | image.set_from_stock (gtk.STOCK_EXECUTE,gtk.ICON_SIZE_BUTTON) | 214 | image.set_from_stock (gtk.STOCK_EXECUTE, gtk.ICON_SIZE_BUTTON) |
215 | button.set_image (image) | 215 | button.set_image (image) |
216 | self.add_action_widget (button, BuildSetupDialog.RESPONSE_BUILD) | 216 | self.add_action_widget (button, BuildSetupDialog.RESPONSE_BUILD) |
217 | button.show_all () | 217 | button.show_all () |
diff --git a/bitbake/lib/bb/ui/uievent.py b/bitbake/lib/bb/ui/uievent.py index 5b3efffcba..f1e4d791ee 100644 --- a/bitbake/lib/bb/ui/uievent.py +++ b/bitbake/lib/bb/ui/uievent.py | |||
@@ -110,7 +110,7 @@ class UIXMLRPCServer (SimpleXMLRPCServer): | |||
110 | return (sock, addr) | 110 | return (sock, addr) |
111 | except socket.timeout: | 111 | except socket.timeout: |
112 | pass | 112 | pass |
113 | return (None,None) | 113 | return (None, None) |
114 | 114 | ||
115 | def close_request(self, request): | 115 | def close_request(self, request): |
116 | if request is None: | 116 | if request is None: |
diff --git a/bitbake/lib/bb/utils.py b/bitbake/lib/bb/utils.py index 7446be875d..02668b16c4 100644 --- a/bitbake/lib/bb/utils.py +++ b/bitbake/lib/bb/utils.py | |||
@@ -72,9 +72,9 @@ def vercmp_part(a, b): | |||
72 | if ca == None and cb == None: | 72 | if ca == None and cb == None: |
73 | return 0 | 73 | return 0 |
74 | 74 | ||
75 | if type(ca) is types.StringType: | 75 | if isinstance(ca, types.StringType): |
76 | sa = ca in separators | 76 | sa = ca in separators |
77 | if type(cb) is types.StringType: | 77 | if isinstance(cb, types.StringType): |
78 | sb = cb in separators | 78 | sb = cb in separators |
79 | if sa and not sb: | 79 | if sa and not sb: |
80 | return -1 | 80 | return -1 |
@@ -306,7 +306,7 @@ def better_compile(text, file, realfile, mode = "exec"): | |||
306 | """ | 306 | """ |
307 | try: | 307 | try: |
308 | return compile(text, file, mode) | 308 | return compile(text, file, mode) |
309 | except Exception, e: | 309 | except Exception as e: |
310 | # split the text into lines again | 310 | # split the text into lines again |
311 | body = text.split('\n') | 311 | body = text.split('\n') |
312 | bb.msg.error(bb.msg.domain.Util, "Error in compiling python function in: ", realfile) | 312 | bb.msg.error(bb.msg.domain.Util, "Error in compiling python function in: ", realfile) |
@@ -385,7 +385,7 @@ def lockfile(name): | |||
385 | return lf | 385 | return lf |
386 | # File no longer exists or changed, retry | 386 | # File no longer exists or changed, retry |
387 | lf.close | 387 | lf.close |
388 | except Exception, e: | 388 | except Exception as e: |
389 | continue | 389 | continue |
390 | 390 | ||
391 | def unlockfile(lf): | 391 | def unlockfile(lf): |
@@ -546,7 +546,7 @@ def mkdirhier(dir): | |||
546 | try: | 546 | try: |
547 | os.makedirs(dir) | 547 | os.makedirs(dir) |
548 | bb.msg.debug(2, bb.msg.domain.Util, "created " + dir) | 548 | bb.msg.debug(2, bb.msg.domain.Util, "created " + dir) |
549 | except OSError, e: | 549 | except OSError as e: |
550 | if e.errno != errno.EEXIST: | 550 | if e.errno != errno.EEXIST: |
551 | raise e | 551 | raise e |
552 | 552 | ||
@@ -561,7 +561,7 @@ def movefile(src, dest, newmtime = None, sstat = None): | |||
561 | try: | 561 | try: |
562 | if not sstat: | 562 | if not sstat: |
563 | sstat = os.lstat(src) | 563 | sstat = os.lstat(src) |
564 | except Exception, e: | 564 | except Exception as e: |
565 | print("movefile: Stating source file failed...", e) | 565 | print("movefile: Stating source file failed...", e) |
566 | return None | 566 | return None |
567 | 567 | ||
@@ -577,7 +577,7 @@ def movefile(src, dest, newmtime = None, sstat = None): | |||
577 | try: | 577 | try: |
578 | os.unlink(dest) | 578 | os.unlink(dest) |
579 | destexists = 0 | 579 | destexists = 0 |
580 | except Exception, e: | 580 | except Exception as e: |
581 | pass | 581 | pass |
582 | 582 | ||
583 | if stat.S_ISLNK(sstat[stat.ST_MODE]): | 583 | if stat.S_ISLNK(sstat[stat.ST_MODE]): |
@@ -589,7 +589,7 @@ def movefile(src, dest, newmtime = None, sstat = None): | |||
589 | #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) | 589 | #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) |
590 | os.unlink(src) | 590 | os.unlink(src) |
591 | return os.lstat(dest) | 591 | return os.lstat(dest) |
592 | except Exception, e: | 592 | except Exception as e: |
593 | print("movefile: failed to properly create symlink:", dest, "->", target, e) | 593 | print("movefile: failed to properly create symlink:", dest, "->", target, e) |
594 | return None | 594 | return None |
595 | 595 | ||
@@ -598,7 +598,7 @@ def movefile(src, dest, newmtime = None, sstat = None): | |||
598 | try: | 598 | try: |
599 | os.rename(src, dest) | 599 | os.rename(src, dest) |
600 | renamefailed = 0 | 600 | renamefailed = 0 |
601 | except Exception, e: | 601 | except Exception as e: |
602 | if e[0] != errno.EXDEV: | 602 | if e[0] != errno.EXDEV: |
603 | # Some random error. | 603 | # Some random error. |
604 | print("movefile: Failed to move", src, "to", dest, e) | 604 | print("movefile: Failed to move", src, "to", dest, e) |
@@ -612,7 +612,7 @@ def movefile(src, dest, newmtime = None, sstat = None): | |||
612 | shutil.copyfile(src, dest + "#new") | 612 | shutil.copyfile(src, dest + "#new") |
613 | os.rename(dest + "#new", dest) | 613 | os.rename(dest + "#new", dest) |
614 | didcopy = 1 | 614 | didcopy = 1 |
615 | except Exception, e: | 615 | except Exception as e: |
616 | print('movefile: copy', src, '->', dest, 'failed.', e) | 616 | print('movefile: copy', src, '->', dest, 'failed.', e) |
617 | return None | 617 | return None |
618 | else: | 618 | else: |
@@ -626,7 +626,7 @@ def movefile(src, dest, newmtime = None, sstat = None): | |||
626 | os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) | 626 | os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) |
627 | os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown | 627 | os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown |
628 | os.unlink(src) | 628 | os.unlink(src) |
629 | except Exception, e: | 629 | except Exception as e: |
630 | print("movefile: Failed to chown/chmod/unlink", dest, e) | 630 | print("movefile: Failed to chown/chmod/unlink", dest, e) |
631 | return None | 631 | return None |
632 | 632 | ||
@@ -647,7 +647,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): | |||
647 | try: | 647 | try: |
648 | if not sstat: | 648 | if not sstat: |
649 | sstat = os.lstat(src) | 649 | sstat = os.lstat(src) |
650 | except Exception, e: | 650 | except Exception as e: |
651 | print("copyfile: Stating source file failed...", e) | 651 | print("copyfile: Stating source file failed...", e) |
652 | return False | 652 | return False |
653 | 653 | ||
@@ -663,7 +663,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): | |||
663 | try: | 663 | try: |
664 | os.unlink(dest) | 664 | os.unlink(dest) |
665 | destexists = 0 | 665 | destexists = 0 |
666 | except Exception, e: | 666 | except Exception as e: |
667 | pass | 667 | pass |
668 | 668 | ||
669 | if stat.S_ISLNK(sstat[stat.ST_MODE]): | 669 | if stat.S_ISLNK(sstat[stat.ST_MODE]): |
@@ -674,7 +674,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): | |||
674 | os.symlink(target, dest) | 674 | os.symlink(target, dest) |
675 | #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) | 675 | #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) |
676 | return os.lstat(dest) | 676 | return os.lstat(dest) |
677 | except Exception, e: | 677 | except Exception as e: |
678 | print("copyfile: failed to properly create symlink:", dest, "->", target, e) | 678 | print("copyfile: failed to properly create symlink:", dest, "->", target, e) |
679 | return False | 679 | return False |
680 | 680 | ||
@@ -682,7 +682,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): | |||
682 | try: # For safety copy then move it over. | 682 | try: # For safety copy then move it over. |
683 | shutil.copyfile(src, dest + "#new") | 683 | shutil.copyfile(src, dest + "#new") |
684 | os.rename(dest + "#new", dest) | 684 | os.rename(dest + "#new", dest) |
685 | except Exception, e: | 685 | except Exception as e: |
686 | print('copyfile: copy', src, '->', dest, 'failed.', e) | 686 | print('copyfile: copy', src, '->', dest, 'failed.', e) |
687 | return False | 687 | return False |
688 | else: | 688 | else: |
@@ -694,7 +694,7 @@ def copyfile(src, dest, newmtime = None, sstat = None): | |||
694 | try: | 694 | try: |
695 | os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) | 695 | os.lchown(dest, sstat[stat.ST_UID], sstat[stat.ST_GID]) |
696 | os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown | 696 | os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown |
697 | except Exception, e: | 697 | except Exception as e: |
698 | print("copyfile: Failed to chown/chmod/unlink", dest, e) | 698 | print("copyfile: Failed to chown/chmod/unlink", dest, e) |
699 | return False | 699 | return False |
700 | 700 | ||