diff options
Diffstat (limited to 'bitbake-dev/lib')
50 files changed, 0 insertions, 15020 deletions
diff --git a/bitbake-dev/lib/bb/COW.py b/bitbake-dev/lib/bb/COW.py deleted file mode 100644 index ca206cf4b4..0000000000 --- a/bitbake-dev/lib/bb/COW.py +++ /dev/null | |||
@@ -1,318 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # This is a copy on write dictionary and set which abuses classes to try and be nice and fast. | ||
5 | # | ||
6 | # Copyright (C) 2006 Tim Amsell | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify | ||
9 | # it under the terms of the GNU General Public License version 2 as | ||
10 | # published by the Free Software Foundation. | ||
11 | # | ||
12 | # This program is distributed in the hope that it will be useful, | ||
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | # GNU General Public License for more details. | ||
16 | # | ||
17 | # You should have received a copy of the GNU General Public License along | ||
18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | # | ||
21 | #Please Note: | ||
22 | # Be careful when using mutable types (ie Dict and Lists) - operations involving these are SLOW. | ||
23 | # Assign a file to __warn__ to get warnings about slow operations. | ||
24 | # | ||
25 | |||
26 | import copy | ||
27 | import types | ||
28 | types.ImmutableTypes = tuple([ \ | ||
29 | types.BooleanType, \ | ||
30 | types.ComplexType, \ | ||
31 | types.FloatType, \ | ||
32 | types.IntType, \ | ||
33 | types.LongType, \ | ||
34 | types.NoneType, \ | ||
35 | types.TupleType, \ | ||
36 | frozenset] + \ | ||
37 | list(types.StringTypes)) | ||
38 | |||
39 | MUTABLE = "__mutable__" | ||
40 | |||
41 | class COWMeta(type): | ||
42 | pass | ||
43 | |||
44 | class COWDictMeta(COWMeta): | ||
45 | __warn__ = False | ||
46 | __hasmutable__ = False | ||
47 | __marker__ = tuple() | ||
48 | |||
49 | def __str__(cls): | ||
50 | # FIXME: I have magic numbers! | ||
51 | return "<COWDict Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) - 3) | ||
52 | __repr__ = __str__ | ||
53 | |||
54 | def cow(cls): | ||
55 | class C(cls): | ||
56 | __count__ = cls.__count__ + 1 | ||
57 | return C | ||
58 | copy = cow | ||
59 | __call__ = cow | ||
60 | |||
61 | def __setitem__(cls, key, value): | ||
62 | if not isinstance(value, types.ImmutableTypes): | ||
63 | if not isinstance(value, COWMeta): | ||
64 | cls.__hasmutable__ = True | ||
65 | key += MUTABLE | ||
66 | setattr(cls, key, value) | ||
67 | |||
68 | def __getmutable__(cls, key, readonly=False): | ||
69 | nkey = key + MUTABLE | ||
70 | try: | ||
71 | return cls.__dict__[nkey] | ||
72 | except KeyError: | ||
73 | pass | ||
74 | |||
75 | value = getattr(cls, nkey) | ||
76 | if readonly: | ||
77 | return value | ||
78 | |||
79 | if not cls.__warn__ is False and not isinstance(value, COWMeta): | ||
80 | print >> cls.__warn__, "Warning: Doing a copy because %s is a mutable type." % key | ||
81 | try: | ||
82 | value = value.copy() | ||
83 | except AttributeError, e: | ||
84 | value = copy.copy(value) | ||
85 | setattr(cls, nkey, value) | ||
86 | return value | ||
87 | |||
88 | __getmarker__ = [] | ||
89 | def __getreadonly__(cls, key, default=__getmarker__): | ||
90 | """\ | ||
91 | Get a value (even if mutable) which you promise not to change. | ||
92 | """ | ||
93 | return cls.__getitem__(key, default, True) | ||
94 | |||
95 | def __getitem__(cls, key, default=__getmarker__, readonly=False): | ||
96 | try: | ||
97 | try: | ||
98 | value = getattr(cls, key) | ||
99 | except AttributeError: | ||
100 | value = cls.__getmutable__(key, readonly) | ||
101 | |||
102 | # This is for values which have been deleted | ||
103 | if value is cls.__marker__: | ||
104 | raise AttributeError("key %s does not exist." % key) | ||
105 | |||
106 | return value | ||
107 | except AttributeError, e: | ||
108 | if not default is cls.__getmarker__: | ||
109 | return default | ||
110 | |||
111 | raise KeyError(str(e)) | ||
112 | |||
113 | def __delitem__(cls, key): | ||
114 | cls.__setitem__(key, cls.__marker__) | ||
115 | |||
116 | def __revertitem__(cls, key): | ||
117 | if not cls.__dict__.has_key(key): | ||
118 | key += MUTABLE | ||
119 | delattr(cls, key) | ||
120 | |||
121 | def has_key(cls, key): | ||
122 | value = cls.__getreadonly__(key, cls.__marker__) | ||
123 | if value is cls.__marker__: | ||
124 | return False | ||
125 | return True | ||
126 | |||
127 | def iter(cls, type, readonly=False): | ||
128 | for key in dir(cls): | ||
129 | if key.startswith("__"): | ||
130 | continue | ||
131 | |||
132 | if key.endswith(MUTABLE): | ||
133 | key = key[:-len(MUTABLE)] | ||
134 | |||
135 | if type == "keys": | ||
136 | yield key | ||
137 | |||
138 | try: | ||
139 | if readonly: | ||
140 | value = cls.__getreadonly__(key) | ||
141 | else: | ||
142 | value = cls[key] | ||
143 | except KeyError: | ||
144 | continue | ||
145 | |||
146 | if type == "values": | ||
147 | yield value | ||
148 | if type == "items": | ||
149 | yield (key, value) | ||
150 | raise StopIteration() | ||
151 | |||
152 | def iterkeys(cls): | ||
153 | return cls.iter("keys") | ||
154 | def itervalues(cls, readonly=False): | ||
155 | if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: | ||
156 | print >> cls.__warn__, "Warning: If you arn't going to change any of the values call with True." | ||
157 | return cls.iter("values", readonly) | ||
158 | def iteritems(cls, readonly=False): | ||
159 | if not cls.__warn__ is False and cls.__hasmutable__ and readonly is False: | ||
160 | print >> cls.__warn__, "Warning: If you arn't going to change any of the values call with True." | ||
161 | return cls.iter("items", readonly) | ||
162 | |||
163 | class COWSetMeta(COWDictMeta): | ||
164 | def __str__(cls): | ||
165 | # FIXME: I have magic numbers! | ||
166 | return "<COWSet Level: %i Current Keys: %i>" % (cls.__count__, len(cls.__dict__) -3) | ||
167 | __repr__ = __str__ | ||
168 | |||
169 | def cow(cls): | ||
170 | class C(cls): | ||
171 | __count__ = cls.__count__ + 1 | ||
172 | return C | ||
173 | |||
174 | def add(cls, value): | ||
175 | COWDictMeta.__setitem__(cls, repr(hash(value)), value) | ||
176 | |||
177 | def remove(cls, value): | ||
178 | COWDictMeta.__delitem__(cls, repr(hash(value))) | ||
179 | |||
180 | def __in__(cls, value): | ||
181 | return COWDictMeta.has_key(repr(hash(value))) | ||
182 | |||
183 | def iterkeys(cls): | ||
184 | raise TypeError("sets don't have keys") | ||
185 | |||
186 | def iteritems(cls): | ||
187 | raise TypeError("sets don't have 'items'") | ||
188 | |||
189 | # These are the actual classes you use! | ||
190 | class COWDictBase(object): | ||
191 | __metaclass__ = COWDictMeta | ||
192 | __count__ = 0 | ||
193 | |||
194 | class COWSetBase(object): | ||
195 | __metaclass__ = COWSetMeta | ||
196 | __count__ = 0 | ||
197 | |||
198 | if __name__ == "__main__": | ||
199 | import sys | ||
200 | COWDictBase.__warn__ = sys.stderr | ||
201 | a = COWDictBase() | ||
202 | print "a", a | ||
203 | |||
204 | a['a'] = 'a' | ||
205 | a['b'] = 'b' | ||
206 | a['dict'] = {} | ||
207 | |||
208 | b = a.copy() | ||
209 | print "b", b | ||
210 | b['c'] = 'b' | ||
211 | |||
212 | |||
213 | |||
214 | print "a", a | ||
215 | for x in a.iteritems(): | ||
216 | print x | ||
217 | print "--" | ||
218 | print "b", b | ||
219 | for x in b.iteritems(): | ||
220 | print x | ||
221 | |||
222 | |||
223 | b['dict']['a'] = 'b' | ||
224 | b['a'] = 'c' | ||
225 | |||
226 | print "a", a | ||
227 | for x in a.iteritems(): | ||
228 | print x | ||
229 | print "--" | ||
230 | print "b", b | ||
231 | for x in b.iteritems(): | ||
232 | print x | ||
233 | |||
234 | |||
235 | try: | ||
236 | b['dict2'] | ||
237 | except KeyError, e: | ||
238 | print "Okay!" | ||
239 | |||
240 | a['set'] = COWSetBase() | ||
241 | a['set'].add("o1") | ||
242 | a['set'].add("o1") | ||
243 | a['set'].add("o2") | ||
244 | |||
245 | print "a", a | ||
246 | for x in a['set'].itervalues(): | ||
247 | print x | ||
248 | print "--" | ||
249 | print "b", b | ||
250 | for x in b['set'].itervalues(): | ||
251 | print x | ||
252 | |||
253 | |||
254 | b['set'].add('o3') | ||
255 | |||
256 | print "a", a | ||
257 | for x in a['set'].itervalues(): | ||
258 | print x | ||
259 | print "--" | ||
260 | print "b", b | ||
261 | for x in b['set'].itervalues(): | ||
262 | print x | ||
263 | |||
264 | |||
265 | a['set2'] = set() | ||
266 | a['set2'].add("o1") | ||
267 | a['set2'].add("o1") | ||
268 | a['set2'].add("o2") | ||
269 | |||
270 | print "a", a | ||
271 | for x in a.iteritems(): | ||
272 | print x | ||
273 | print "--" | ||
274 | print "b", b | ||
275 | for x in b.iteritems(readonly=True): | ||
276 | print x | ||
277 | |||
278 | |||
279 | del b['b'] | ||
280 | try: | ||
281 | print b['b'] | ||
282 | except KeyError: | ||
283 | print "Yay! deleted key raises error" | ||
284 | |||
285 | if b.has_key('b'): | ||
286 | print "Boo!" | ||
287 | else: | ||
288 | print "Yay - has_key with delete works!" | ||
289 | |||
290 | print "a", a | ||
291 | for x in a.iteritems(): | ||
292 | print x | ||
293 | print "--" | ||
294 | print "b", b | ||
295 | for x in b.iteritems(readonly=True): | ||
296 | print x | ||
297 | |||
298 | |||
299 | b.__revertitem__('b') | ||
300 | |||
301 | print "a", a | ||
302 | for x in a.iteritems(): | ||
303 | print x | ||
304 | print "--" | ||
305 | print "b", b | ||
306 | for x in b.iteritems(readonly=True): | ||
307 | print x | ||
308 | |||
309 | |||
310 | b.__revertitem__('dict') | ||
311 | print "a", a | ||
312 | for x in a.iteritems(): | ||
313 | print x | ||
314 | print "--" | ||
315 | print "b", b | ||
316 | for x in b.iteritems(readonly=True): | ||
317 | print x | ||
318 | |||
diff --git a/bitbake-dev/lib/bb/__init__.py b/bitbake-dev/lib/bb/__init__.py deleted file mode 100644 index f2f8f656d8..0000000000 --- a/bitbake-dev/lib/bb/__init__.py +++ /dev/null | |||
@@ -1,1134 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # BitBake Build System Python Library | ||
5 | # | ||
6 | # Copyright (C) 2003 Holger Schurig | ||
7 | # Copyright (C) 2003, 2004 Chris Larson | ||
8 | # | ||
9 | # Based on Gentoo's portage.py. | ||
10 | # | ||
11 | # This program is free software; you can redistribute it and/or modify | ||
12 | # it under the terms of the GNU General Public License version 2 as | ||
13 | # published by the Free Software Foundation. | ||
14 | # | ||
15 | # This program is distributed in the hope that it will be useful, | ||
16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | # GNU General Public License for more details. | ||
19 | # | ||
20 | # You should have received a copy of the GNU General Public License along | ||
21 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
22 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
23 | |||
24 | __version__ = "1.9.0" | ||
25 | |||
26 | __all__ = [ | ||
27 | |||
28 | "debug", | ||
29 | "note", | ||
30 | "error", | ||
31 | "fatal", | ||
32 | |||
33 | "mkdirhier", | ||
34 | "movefile", | ||
35 | |||
36 | "tokenize", | ||
37 | "evaluate", | ||
38 | "flatten", | ||
39 | "relparse", | ||
40 | "ververify", | ||
41 | "isjustname", | ||
42 | "isspecific", | ||
43 | "pkgsplit", | ||
44 | "catpkgsplit", | ||
45 | "vercmp", | ||
46 | "pkgcmp", | ||
47 | "dep_parenreduce", | ||
48 | "dep_opconvert", | ||
49 | |||
50 | # fetch | ||
51 | "decodeurl", | ||
52 | "encodeurl", | ||
53 | |||
54 | # modules | ||
55 | "parse", | ||
56 | "data", | ||
57 | "command", | ||
58 | "event", | ||
59 | "build", | ||
60 | "fetch", | ||
61 | "manifest", | ||
62 | "methodpool", | ||
63 | "cache", | ||
64 | "runqueue", | ||
65 | "taskdata", | ||
66 | "providers", | ||
67 | ] | ||
68 | |||
69 | whitespace = '\t\n\x0b\x0c\r ' | ||
70 | lowercase = 'abcdefghijklmnopqrstuvwxyz' | ||
71 | |||
72 | import sys, os, types, re, string, bb | ||
73 | from bb import msg | ||
74 | |||
75 | #projectdir = os.path.dirname(os.path.dirname(os.path.abspath(sys.argv[0]))) | ||
76 | projectdir = os.getcwd() | ||
77 | |||
78 | if "BBDEBUG" in os.environ: | ||
79 | level = int(os.environ["BBDEBUG"]) | ||
80 | if level: | ||
81 | bb.msg.set_debug_level(level) | ||
82 | |||
83 | class VarExpandError(Exception): | ||
84 | pass | ||
85 | |||
86 | class MalformedUrl(Exception): | ||
87 | """Exception raised when encountering an invalid url""" | ||
88 | |||
89 | |||
90 | ####################################################################### | ||
91 | ####################################################################### | ||
92 | # | ||
93 | # SECTION: Debug | ||
94 | # | ||
95 | # PURPOSE: little functions to make yourself known | ||
96 | # | ||
97 | ####################################################################### | ||
98 | ####################################################################### | ||
99 | |||
100 | def plain(*args): | ||
101 | bb.msg.warn(''.join(args)) | ||
102 | |||
103 | def debug(lvl, *args): | ||
104 | bb.msg.debug(lvl, None, ''.join(args)) | ||
105 | |||
106 | def note(*args): | ||
107 | bb.msg.note(1, None, ''.join(args)) | ||
108 | |||
109 | def warn(*args): | ||
110 | bb.msg.warn(1, None, ''.join(args)) | ||
111 | |||
112 | def error(*args): | ||
113 | bb.msg.error(None, ''.join(args)) | ||
114 | |||
115 | def fatal(*args): | ||
116 | bb.msg.fatal(None, ''.join(args)) | ||
117 | |||
118 | |||
119 | ####################################################################### | ||
120 | ####################################################################### | ||
121 | # | ||
122 | # SECTION: File | ||
123 | # | ||
124 | # PURPOSE: Basic file and directory tree related functions | ||
125 | # | ||
126 | ####################################################################### | ||
127 | ####################################################################### | ||
128 | |||
129 | def mkdirhier(dir): | ||
130 | """Create a directory like 'mkdir -p', but does not complain if | ||
131 | directory already exists like os.makedirs | ||
132 | """ | ||
133 | |||
134 | debug(3, "mkdirhier(%s)" % dir) | ||
135 | try: | ||
136 | os.makedirs(dir) | ||
137 | debug(2, "created " + dir) | ||
138 | except OSError, e: | ||
139 | if e.errno != 17: raise e | ||
140 | |||
141 | |||
142 | ####################################################################### | ||
143 | |||
144 | import stat | ||
145 | |||
146 | def movefile(src,dest,newmtime=None,sstat=None): | ||
147 | """Moves a file from src to dest, preserving all permissions and | ||
148 | attributes; mtime will be preserved even when moving across | ||
149 | filesystems. Returns true on success and false on failure. Move is | ||
150 | atomic. | ||
151 | """ | ||
152 | |||
153 | #print "movefile("+src+","+dest+","+str(newmtime)+","+str(sstat)+")" | ||
154 | try: | ||
155 | if not sstat: | ||
156 | sstat=os.lstat(src) | ||
157 | except Exception, e: | ||
158 | print "movefile: Stating source file failed...", e | ||
159 | return None | ||
160 | |||
161 | destexists=1 | ||
162 | try: | ||
163 | dstat=os.lstat(dest) | ||
164 | except: | ||
165 | dstat=os.lstat(os.path.dirname(dest)) | ||
166 | destexists=0 | ||
167 | |||
168 | if destexists: | ||
169 | if stat.S_ISLNK(dstat[stat.ST_MODE]): | ||
170 | try: | ||
171 | os.unlink(dest) | ||
172 | destexists=0 | ||
173 | except Exception, e: | ||
174 | pass | ||
175 | |||
176 | if stat.S_ISLNK(sstat[stat.ST_MODE]): | ||
177 | try: | ||
178 | target=os.readlink(src) | ||
179 | if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): | ||
180 | os.unlink(dest) | ||
181 | os.symlink(target,dest) | ||
182 | #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) | ||
183 | os.unlink(src) | ||
184 | return os.lstat(dest) | ||
185 | except Exception, e: | ||
186 | print "movefile: failed to properly create symlink:", dest, "->", target, e | ||
187 | return None | ||
188 | |||
189 | renamefailed=1 | ||
190 | if sstat[stat.ST_DEV]==dstat[stat.ST_DEV]: | ||
191 | try: | ||
192 | ret=os.rename(src,dest) | ||
193 | renamefailed=0 | ||
194 | except Exception, e: | ||
195 | import errno | ||
196 | if e[0]!=errno.EXDEV: | ||
197 | # Some random error. | ||
198 | print "movefile: Failed to move", src, "to", dest, e | ||
199 | return None | ||
200 | # Invalid cross-device-link 'bind' mounted or actually Cross-Device | ||
201 | |||
202 | if renamefailed: | ||
203 | didcopy=0 | ||
204 | if stat.S_ISREG(sstat[stat.ST_MODE]): | ||
205 | try: # For safety copy then move it over. | ||
206 | shutil.copyfile(src,dest+"#new") | ||
207 | os.rename(dest+"#new",dest) | ||
208 | didcopy=1 | ||
209 | except Exception, e: | ||
210 | print 'movefile: copy', src, '->', dest, 'failed.', e | ||
211 | return None | ||
212 | else: | ||
213 | #we don't yet handle special, so we need to fall back to /bin/mv | ||
214 | a=getstatusoutput("/bin/mv -f "+"'"+src+"' '"+dest+"'") | ||
215 | if a[0]!=0: | ||
216 | print "movefile: Failed to move special file:" + src + "' to '" + dest + "'", a | ||
217 | return None # failure | ||
218 | try: | ||
219 | if didcopy: | ||
220 | missingos.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) | ||
221 | os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown | ||
222 | os.unlink(src) | ||
223 | except Exception, e: | ||
224 | print "movefile: Failed to chown/chmod/unlink", dest, e | ||
225 | return None | ||
226 | |||
227 | if newmtime: | ||
228 | os.utime(dest,(newmtime,newmtime)) | ||
229 | else: | ||
230 | os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) | ||
231 | newmtime=sstat[stat.ST_MTIME] | ||
232 | return newmtime | ||
233 | |||
234 | def copyfile(src,dest,newmtime=None,sstat=None): | ||
235 | """ | ||
236 | Copies a file from src to dest, preserving all permissions and | ||
237 | attributes; mtime will be preserved even when moving across | ||
238 | filesystems. Returns true on success and false on failure. | ||
239 | """ | ||
240 | import os, stat, shutil | ||
241 | |||
242 | #print "copyfile("+src+","+dest+","+str(newmtime)+","+str(sstat)+")" | ||
243 | try: | ||
244 | if not sstat: | ||
245 | sstat=os.lstat(src) | ||
246 | except Exception, e: | ||
247 | print "copyfile: Stating source file failed...", e | ||
248 | return False | ||
249 | |||
250 | destexists=1 | ||
251 | try: | ||
252 | dstat=os.lstat(dest) | ||
253 | except: | ||
254 | dstat=os.lstat(os.path.dirname(dest)) | ||
255 | destexists=0 | ||
256 | |||
257 | if destexists: | ||
258 | if stat.S_ISLNK(dstat[stat.ST_MODE]): | ||
259 | try: | ||
260 | os.unlink(dest) | ||
261 | destexists=0 | ||
262 | except Exception, e: | ||
263 | pass | ||
264 | |||
265 | if stat.S_ISLNK(sstat[stat.ST_MODE]): | ||
266 | try: | ||
267 | target=os.readlink(src) | ||
268 | if destexists and not stat.S_ISDIR(dstat[stat.ST_MODE]): | ||
269 | os.unlink(dest) | ||
270 | os.symlink(target,dest) | ||
271 | #os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) | ||
272 | return os.lstat(dest) | ||
273 | except Exception, e: | ||
274 | print "copyfile: failed to properly create symlink:", dest, "->", target, e | ||
275 | return False | ||
276 | |||
277 | if stat.S_ISREG(sstat[stat.ST_MODE]): | ||
278 | try: # For safety copy then move it over. | ||
279 | shutil.copyfile(src,dest+"#new") | ||
280 | os.rename(dest+"#new",dest) | ||
281 | except Exception, e: | ||
282 | print 'copyfile: copy', src, '->', dest, 'failed.', e | ||
283 | return False | ||
284 | else: | ||
285 | #we don't yet handle special, so we need to fall back to /bin/mv | ||
286 | a=getstatusoutput("/bin/cp -f "+"'"+src+"' '"+dest+"'") | ||
287 | if a[0]!=0: | ||
288 | print "copyfile: Failed to copy special file:" + src + "' to '" + dest + "'", a | ||
289 | return False # failure | ||
290 | try: | ||
291 | os.lchown(dest,sstat[stat.ST_UID],sstat[stat.ST_GID]) | ||
292 | os.chmod(dest, stat.S_IMODE(sstat[stat.ST_MODE])) # Sticky is reset on chown | ||
293 | except Exception, e: | ||
294 | print "copyfile: Failed to chown/chmod/unlink", dest, e | ||
295 | return False | ||
296 | |||
297 | if newmtime: | ||
298 | os.utime(dest,(newmtime,newmtime)) | ||
299 | else: | ||
300 | os.utime(dest, (sstat[stat.ST_ATIME], sstat[stat.ST_MTIME])) | ||
301 | newmtime=sstat[stat.ST_MTIME] | ||
302 | return newmtime | ||
303 | |||
304 | ####################################################################### | ||
305 | ####################################################################### | ||
306 | # | ||
307 | # SECTION: Download | ||
308 | # | ||
309 | # PURPOSE: Download via HTTP, FTP, CVS, BITKEEPER, handling of MD5-signatures | ||
310 | # and mirrors | ||
311 | # | ||
312 | ####################################################################### | ||
313 | ####################################################################### | ||
314 | |||
315 | def decodeurl(url): | ||
316 | """Decodes an URL into the tokens (scheme, network location, path, | ||
317 | user, password, parameters). | ||
318 | |||
319 | >>> decodeurl("http://www.google.com/index.html") | ||
320 | ('http', 'www.google.com', '/index.html', '', '', {}) | ||
321 | |||
322 | CVS url with username, host and cvsroot. The cvs module to check out is in the | ||
323 | parameters: | ||
324 | |||
325 | >>> decodeurl("cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg") | ||
326 | ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'}) | ||
327 | |||
328 | Dito, but this time the username has a password part. And we also request a special tag | ||
329 | to check out. | ||
330 | |||
331 | >>> decodeurl("cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;module=familiar/dist/ipkg;tag=V0-99-81") | ||
332 | ('cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', {'tag': 'V0-99-81', 'module': 'familiar/dist/ipkg'}) | ||
333 | """ | ||
334 | |||
335 | m = re.compile('(?P<type>[^:]*)://((?P<user>.+)@)?(?P<location>[^;]+)(;(?P<parm>.*))?').match(url) | ||
336 | if not m: | ||
337 | raise MalformedUrl(url) | ||
338 | |||
339 | type = m.group('type') | ||
340 | location = m.group('location') | ||
341 | if not location: | ||
342 | raise MalformedUrl(url) | ||
343 | user = m.group('user') | ||
344 | parm = m.group('parm') | ||
345 | |||
346 | locidx = location.find('/') | ||
347 | if locidx != -1: | ||
348 | host = location[:locidx] | ||
349 | path = location[locidx:] | ||
350 | else: | ||
351 | host = "" | ||
352 | path = location | ||
353 | if user: | ||
354 | m = re.compile('(?P<user>[^:]+)(:?(?P<pswd>.*))').match(user) | ||
355 | if m: | ||
356 | user = m.group('user') | ||
357 | pswd = m.group('pswd') | ||
358 | else: | ||
359 | user = '' | ||
360 | pswd = '' | ||
361 | |||
362 | p = {} | ||
363 | if parm: | ||
364 | for s in parm.split(';'): | ||
365 | s1,s2 = s.split('=') | ||
366 | p[s1] = s2 | ||
367 | |||
368 | return (type, host, path, user, pswd, p) | ||
369 | |||
370 | ####################################################################### | ||
371 | |||
372 | def encodeurl(decoded): | ||
373 | """Encodes a URL from tokens (scheme, network location, path, | ||
374 | user, password, parameters). | ||
375 | |||
376 | >>> encodeurl(['http', 'www.google.com', '/index.html', '', '', {}]) | ||
377 | 'http://www.google.com/index.html' | ||
378 | |||
379 | CVS with username, host and cvsroot. The cvs module to check out is in the | ||
380 | parameters: | ||
381 | |||
382 | >>> encodeurl(['cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', '', {'module': 'familiar/dist/ipkg'}]) | ||
383 | 'cvs://anoncvs@cvs.handhelds.org/cvs;module=familiar/dist/ipkg' | ||
384 | |||
385 | Dito, but this time the username has a password part. And we also request a special tag | ||
386 | to check out. | ||
387 | |||
388 | >>> encodeurl(['cvs', 'cvs.handhelds.org', '/cvs', 'anoncvs', 'anonymous', {'tag': 'V0-99-81', 'module': 'familiar/dist/ipkg'}]) | ||
389 | 'cvs://anoncvs:anonymous@cvs.handhelds.org/cvs;tag=V0-99-81;module=familiar/dist/ipkg' | ||
390 | """ | ||
391 | |||
392 | (type, host, path, user, pswd, p) = decoded | ||
393 | |||
394 | if not type or not path: | ||
395 | fatal("invalid or missing parameters for url encoding") | ||
396 | url = '%s://' % type | ||
397 | if user: | ||
398 | url += "%s" % user | ||
399 | if pswd: | ||
400 | url += ":%s" % pswd | ||
401 | url += "@" | ||
402 | if host: | ||
403 | url += "%s" % host | ||
404 | url += "%s" % path | ||
405 | if p: | ||
406 | for parm in p.keys(): | ||
407 | url += ";%s=%s" % (parm, p[parm]) | ||
408 | |||
409 | return url | ||
410 | |||
411 | ####################################################################### | ||
412 | |||
413 | def which(path, item, direction = 0): | ||
414 | """ | ||
415 | Locate a file in a PATH | ||
416 | """ | ||
417 | |||
418 | paths = (path or "").split(':') | ||
419 | if direction != 0: | ||
420 | paths.reverse() | ||
421 | |||
422 | for p in (path or "").split(':'): | ||
423 | next = os.path.join(p, item) | ||
424 | if os.path.exists(next): | ||
425 | return next | ||
426 | |||
427 | return "" | ||
428 | |||
429 | ####################################################################### | ||
430 | |||
431 | |||
432 | |||
433 | |||
434 | ####################################################################### | ||
435 | ####################################################################### | ||
436 | # | ||
437 | # SECTION: Dependency | ||
438 | # | ||
439 | # PURPOSE: Compare build & run dependencies | ||
440 | # | ||
441 | ####################################################################### | ||
442 | ####################################################################### | ||
443 | |||
444 | def tokenize(mystring): | ||
445 | """Breaks a string like 'foo? (bar) oni? (blah (blah))' into (possibly embedded) lists: | ||
446 | |||
447 | >>> tokenize("x") | ||
448 | ['x'] | ||
449 | >>> tokenize("x y") | ||
450 | ['x', 'y'] | ||
451 | >>> tokenize("(x y)") | ||
452 | [['x', 'y']] | ||
453 | >>> tokenize("(x y) b c") | ||
454 | [['x', 'y'], 'b', 'c'] | ||
455 | >>> tokenize("foo? (bar) oni? (blah (blah))") | ||
456 | ['foo?', ['bar'], 'oni?', ['blah', ['blah']]] | ||
457 | >>> tokenize("sys-apps/linux-headers nls? (sys-devel/gettext)") | ||
458 | ['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']] | ||
459 | """ | ||
460 | |||
461 | newtokens = [] | ||
462 | curlist = newtokens | ||
463 | prevlists = [] | ||
464 | level = 0 | ||
465 | accum = "" | ||
466 | for x in mystring: | ||
467 | if x=="(": | ||
468 | if accum: | ||
469 | curlist.append(accum) | ||
470 | accum="" | ||
471 | prevlists.append(curlist) | ||
472 | curlist=[] | ||
473 | level=level+1 | ||
474 | elif x==")": | ||
475 | if accum: | ||
476 | curlist.append(accum) | ||
477 | accum="" | ||
478 | if level==0: | ||
479 | print "!!! tokenizer: Unmatched left parenthesis in:\n'"+mystring+"'" | ||
480 | return None | ||
481 | newlist=curlist | ||
482 | curlist=prevlists.pop() | ||
483 | curlist.append(newlist) | ||
484 | level=level-1 | ||
485 | elif x in whitespace: | ||
486 | if accum: | ||
487 | curlist.append(accum) | ||
488 | accum="" | ||
489 | else: | ||
490 | accum=accum+x | ||
491 | if accum: | ||
492 | curlist.append(accum) | ||
493 | if (level!=0): | ||
494 | print "!!! tokenizer: Exiting with unterminated parenthesis in:\n'"+mystring+"'" | ||
495 | return None | ||
496 | return newtokens | ||
497 | |||
498 | |||
499 | ####################################################################### | ||
500 | |||
501 | def evaluate(tokens,mydefines,allon=0): | ||
502 | """Removes tokens based on whether conditional definitions exist or not. | ||
503 | Recognizes ! | ||
504 | |||
505 | >>> evaluate(['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']], {}) | ||
506 | ['sys-apps/linux-headers'] | ||
507 | |||
508 | Negate the flag: | ||
509 | |||
510 | >>> evaluate(['sys-apps/linux-headers', '!nls?', ['sys-devel/gettext']], {}) | ||
511 | ['sys-apps/linux-headers', ['sys-devel/gettext']] | ||
512 | |||
513 | Define 'nls': | ||
514 | |||
515 | >>> evaluate(['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']], {"nls":1}) | ||
516 | ['sys-apps/linux-headers', ['sys-devel/gettext']] | ||
517 | |||
518 | Turn allon on: | ||
519 | |||
520 | >>> evaluate(['sys-apps/linux-headers', 'nls?', ['sys-devel/gettext']], {}, True) | ||
521 | ['sys-apps/linux-headers', ['sys-devel/gettext']] | ||
522 | """ | ||
523 | |||
524 | if tokens == None: | ||
525 | return None | ||
526 | mytokens = tokens + [] # this copies the list | ||
527 | pos = 0 | ||
528 | while pos < len(mytokens): | ||
529 | if type(mytokens[pos]) == types.ListType: | ||
530 | evaluate(mytokens[pos], mydefines) | ||
531 | if not len(mytokens[pos]): | ||
532 | del mytokens[pos] | ||
533 | continue | ||
534 | elif mytokens[pos][-1] == "?": | ||
535 | cur = mytokens[pos][:-1] | ||
536 | del mytokens[pos] | ||
537 | if allon: | ||
538 | if cur[0] == "!": | ||
539 | del mytokens[pos] | ||
540 | else: | ||
541 | if cur[0] == "!": | ||
542 | if (cur[1:] in mydefines) and (pos < len(mytokens)): | ||
543 | del mytokens[pos] | ||
544 | continue | ||
545 | elif (cur not in mydefines) and (pos < len(mytokens)): | ||
546 | del mytokens[pos] | ||
547 | continue | ||
548 | pos = pos + 1 | ||
549 | return mytokens | ||
550 | |||
551 | |||
552 | ####################################################################### | ||
553 | |||
554 | def flatten(mytokens): | ||
555 | """Converts nested arrays into a flat arrays: | ||
556 | |||
557 | >>> flatten([1,[2,3]]) | ||
558 | [1, 2, 3] | ||
559 | >>> flatten(['sys-apps/linux-headers', ['sys-devel/gettext']]) | ||
560 | ['sys-apps/linux-headers', 'sys-devel/gettext'] | ||
561 | """ | ||
562 | |||
563 | newlist=[] | ||
564 | for x in mytokens: | ||
565 | if type(x)==types.ListType: | ||
566 | newlist.extend(flatten(x)) | ||
567 | else: | ||
568 | newlist.append(x) | ||
569 | return newlist | ||
570 | |||
571 | |||
572 | ####################################################################### | ||
573 | |||
574 | _package_weights_ = {"pre":-2,"p":0,"alpha":-4,"beta":-3,"rc":-1} # dicts are unordered | ||
575 | _package_ends_ = ["pre", "p", "alpha", "beta", "rc", "cvs", "bk", "HEAD" ] # so we need ordered list | ||
576 | |||
577 | def relparse(myver): | ||
578 | """Parses the last elements of a version number into a triplet, that can | ||
579 | later be compared: | ||
580 | |||
581 | >>> relparse('1.2_pre3') | ||
582 | [1.2, -2, 3.0] | ||
583 | >>> relparse('1.2b') | ||
584 | [1.2, 98, 0] | ||
585 | >>> relparse('1.2') | ||
586 | [1.2, 0, 0] | ||
587 | """ | ||
588 | |||
589 | number = 0 | ||
590 | p1 = 0 | ||
591 | p2 = 0 | ||
592 | mynewver = myver.split('_') | ||
593 | if len(mynewver)==2: | ||
594 | # an _package_weights_ | ||
595 | number = float(mynewver[0]) | ||
596 | match = 0 | ||
597 | for x in _package_ends_: | ||
598 | elen = len(x) | ||
599 | if mynewver[1][:elen] == x: | ||
600 | match = 1 | ||
601 | p1 = _package_weights_[x] | ||
602 | try: | ||
603 | p2 = float(mynewver[1][elen:]) | ||
604 | except: | ||
605 | p2 = 0 | ||
606 | break | ||
607 | if not match: | ||
608 | # normal number or number with letter at end | ||
609 | divider = len(myver)-1 | ||
610 | if myver[divider:] not in "1234567890": | ||
611 | # letter at end | ||
612 | p1 = ord(myver[divider:]) | ||
613 | number = float(myver[0:divider]) | ||
614 | else: | ||
615 | number = float(myver) | ||
616 | else: | ||
617 | # normal number or number with letter at end | ||
618 | divider = len(myver)-1 | ||
619 | if myver[divider:] not in "1234567890": | ||
620 | #letter at end | ||
621 | p1 = ord(myver[divider:]) | ||
622 | number = float(myver[0:divider]) | ||
623 | else: | ||
624 | number = float(myver) | ||
625 | return [number,p1,p2] | ||
626 | |||
627 | |||
628 | ####################################################################### | ||
629 | |||
630 | __ververify_cache__ = {} | ||
631 | |||
632 | def ververify(myorigval,silent=1): | ||
633 | """Returns 1 if given a valid version string, els 0. Valid versions are in the format | ||
634 | |||
635 | <v1>.<v2>...<vx>[a-z,_{_package_weights_}[vy]] | ||
636 | |||
637 | >>> ververify('2.4.20') | ||
638 | 1 | ||
639 | >>> ververify('2.4..20') # two dots | ||
640 | 0 | ||
641 | >>> ververify('2.x.20') # 'x' is not numeric | ||
642 | 0 | ||
643 | >>> ververify('2.4.20a') | ||
644 | 1 | ||
645 | >>> ververify('2.4.20cvs') # only one trailing letter | ||
646 | 0 | ||
647 | >>> ververify('1a') | ||
648 | 1 | ||
649 | >>> ververify('test_a') # no version at all | ||
650 | 0 | ||
651 | >>> ververify('2.4.20_beta1') | ||
652 | 1 | ||
653 | >>> ververify('2.4.20_beta') | ||
654 | 1 | ||
655 | >>> ververify('2.4.20_wrongext') # _wrongext is no valid trailer | ||
656 | 0 | ||
657 | """ | ||
658 | |||
659 | # Lookup the cache first | ||
660 | try: | ||
661 | return __ververify_cache__[myorigval] | ||
662 | except KeyError: | ||
663 | pass | ||
664 | |||
665 | if len(myorigval) == 0: | ||
666 | if not silent: | ||
667 | error("package version is empty") | ||
668 | __ververify_cache__[myorigval] = 0 | ||
669 | return 0 | ||
670 | myval = myorigval.split('.') | ||
671 | if len(myval)==0: | ||
672 | if not silent: | ||
673 | error("package name has empty version string") | ||
674 | __ververify_cache__[myorigval] = 0 | ||
675 | return 0 | ||
676 | # all but the last version must be a numeric | ||
677 | for x in myval[:-1]: | ||
678 | if not len(x): | ||
679 | if not silent: | ||
680 | error("package version has two points in a row") | ||
681 | __ververify_cache__[myorigval] = 0 | ||
682 | return 0 | ||
683 | try: | ||
684 | foo = int(x) | ||
685 | except: | ||
686 | if not silent: | ||
687 | error("package version contains non-numeric '"+x+"'") | ||
688 | __ververify_cache__[myorigval] = 0 | ||
689 | return 0 | ||
690 | if not len(myval[-1]): | ||
691 | if not silent: | ||
692 | error("package version has trailing dot") | ||
693 | __ververify_cache__[myorigval] = 0 | ||
694 | return 0 | ||
695 | try: | ||
696 | foo = int(myval[-1]) | ||
697 | __ververify_cache__[myorigval] = 1 | ||
698 | return 1 | ||
699 | except: | ||
700 | pass | ||
701 | |||
702 | # ok, our last component is not a plain number or blank, let's continue | ||
703 | if myval[-1][-1] in lowercase: | ||
704 | try: | ||
705 | foo = int(myval[-1][:-1]) | ||
706 | return 1 | ||
707 | __ververify_cache__[myorigval] = 1 | ||
708 | # 1a, 2.0b, etc. | ||
709 | except: | ||
710 | pass | ||
711 | # ok, maybe we have a 1_alpha or 1_beta2; let's see | ||
712 | ep=string.split(myval[-1],"_") | ||
713 | if len(ep)!= 2: | ||
714 | if not silent: | ||
715 | error("package version has more than one letter at then end") | ||
716 | __ververify_cache__[myorigval] = 0 | ||
717 | return 0 | ||
718 | try: | ||
719 | foo = string.atoi(ep[0]) | ||
720 | except: | ||
721 | # this needs to be numeric, i.e. the "1" in "1_alpha" | ||
722 | if not silent: | ||
723 | error("package version must have numeric part before the '_'") | ||
724 | __ververify_cache__[myorigval] = 0 | ||
725 | return 0 | ||
726 | |||
727 | for mye in _package_ends_: | ||
728 | if ep[1][0:len(mye)] == mye: | ||
729 | if len(mye) == len(ep[1]): | ||
730 | # no trailing numeric is ok | ||
731 | __ververify_cache__[myorigval] = 1 | ||
732 | return 1 | ||
733 | else: | ||
734 | try: | ||
735 | foo = string.atoi(ep[1][len(mye):]) | ||
736 | __ververify_cache__[myorigval] = 1 | ||
737 | return 1 | ||
738 | except: | ||
739 | # if no _package_weights_ work, *then* we return 0 | ||
740 | pass | ||
741 | if not silent: | ||
742 | error("package version extension after '_' is invalid") | ||
743 | __ververify_cache__[myorigval] = 0 | ||
744 | return 0 | ||
745 | |||
746 | |||
747 | def isjustname(mypkg): | ||
748 | myparts = string.split(mypkg,'-') | ||
749 | for x in myparts: | ||
750 | if ververify(x): | ||
751 | return 0 | ||
752 | return 1 | ||
753 | |||
754 | |||
755 | _isspecific_cache_={} | ||
756 | |||
757 | def isspecific(mypkg): | ||
758 | "now supports packages with no category" | ||
759 | try: | ||
760 | return __isspecific_cache__[mypkg] | ||
761 | except: | ||
762 | pass | ||
763 | |||
764 | mysplit = string.split(mypkg,"/") | ||
765 | if not isjustname(mysplit[-1]): | ||
766 | __isspecific_cache__[mypkg] = 1 | ||
767 | return 1 | ||
768 | __isspecific_cache__[mypkg] = 0 | ||
769 | return 0 | ||
770 | |||
771 | |||
772 | ####################################################################### | ||
773 | |||
774 | __pkgsplit_cache__={} | ||
775 | |||
776 | def pkgsplit(mypkg, silent=1): | ||
777 | |||
778 | """This function can be used as a package verification function. If | ||
779 | it is a valid name, pkgsplit will return a list containing: | ||
780 | [pkgname, pkgversion(norev), pkgrev ]. | ||
781 | |||
782 | >>> pkgsplit('') | ||
783 | >>> pkgsplit('x') | ||
784 | >>> pkgsplit('x-') | ||
785 | >>> pkgsplit('-1') | ||
786 | >>> pkgsplit('glibc-1.2-8.9-r7') | ||
787 | >>> pkgsplit('glibc-2.2.5-r7') | ||
788 | ['glibc', '2.2.5', 'r7'] | ||
789 | >>> pkgsplit('foo-1.2-1') | ||
790 | >>> pkgsplit('Mesa-3.0') | ||
791 | ['Mesa', '3.0', 'r0'] | ||
792 | """ | ||
793 | |||
794 | try: | ||
795 | return __pkgsplit_cache__[mypkg] | ||
796 | except KeyError: | ||
797 | pass | ||
798 | |||
799 | myparts = string.split(mypkg,'-') | ||
800 | if len(myparts) < 2: | ||
801 | if not silent: | ||
802 | error("package name without name or version part") | ||
803 | __pkgsplit_cache__[mypkg] = None | ||
804 | return None | ||
805 | for x in myparts: | ||
806 | if len(x) == 0: | ||
807 | if not silent: | ||
808 | error("package name with empty name or version part") | ||
809 | __pkgsplit_cache__[mypkg] = None | ||
810 | return None | ||
811 | # verify rev | ||
812 | revok = 0 | ||
813 | myrev = myparts[-1] | ||
814 | ververify(myrev, silent) | ||
815 | if len(myrev) and myrev[0] == "r": | ||
816 | try: | ||
817 | string.atoi(myrev[1:]) | ||
818 | revok = 1 | ||
819 | except: | ||
820 | pass | ||
821 | if revok: | ||
822 | if ververify(myparts[-2]): | ||
823 | if len(myparts) == 2: | ||
824 | __pkgsplit_cache__[mypkg] = None | ||
825 | return None | ||
826 | else: | ||
827 | for x in myparts[:-2]: | ||
828 | if ververify(x): | ||
829 | __pkgsplit_cache__[mypkg]=None | ||
830 | return None | ||
831 | # names can't have versiony looking parts | ||
832 | myval=[string.join(myparts[:-2],"-"),myparts[-2],myparts[-1]] | ||
833 | __pkgsplit_cache__[mypkg]=myval | ||
834 | return myval | ||
835 | else: | ||
836 | __pkgsplit_cache__[mypkg] = None | ||
837 | return None | ||
838 | |||
839 | elif ververify(myparts[-1],silent): | ||
840 | if len(myparts)==1: | ||
841 | if not silent: | ||
842 | print "!!! Name error in",mypkg+": missing name part." | ||
843 | __pkgsplit_cache__[mypkg]=None | ||
844 | return None | ||
845 | else: | ||
846 | for x in myparts[:-1]: | ||
847 | if ververify(x): | ||
848 | if not silent: error("package name has multiple version parts") | ||
849 | __pkgsplit_cache__[mypkg] = None | ||
850 | return None | ||
851 | myval = [string.join(myparts[:-1],"-"), myparts[-1],"r0"] | ||
852 | __pkgsplit_cache__[mypkg] = myval | ||
853 | return myval | ||
854 | else: | ||
855 | __pkgsplit_cache__[mypkg] = None | ||
856 | return None | ||
857 | |||
858 | |||
859 | ####################################################################### | ||
860 | |||
861 | __catpkgsplit_cache__ = {} | ||
862 | |||
863 | def catpkgsplit(mydata,silent=1): | ||
864 | """returns [cat, pkgname, version, rev ] | ||
865 | |||
866 | >>> catpkgsplit('sys-libs/glibc-1.2-r7') | ||
867 | ['sys-libs', 'glibc', '1.2', 'r7'] | ||
868 | >>> catpkgsplit('glibc-1.2-r7') | ||
869 | [None, 'glibc', '1.2', 'r7'] | ||
870 | """ | ||
871 | |||
872 | try: | ||
873 | return __catpkgsplit_cache__[mydata] | ||
874 | except KeyError: | ||
875 | pass | ||
876 | |||
877 | cat = os.path.basename(os.path.dirname(mydata)) | ||
878 | mydata = os.path.join(cat, os.path.basename(mydata)) | ||
879 | if mydata[-3:] == '.bb': | ||
880 | mydata = mydata[:-3] | ||
881 | |||
882 | mysplit = mydata.split("/") | ||
883 | p_split = None | ||
884 | splitlen = len(mysplit) | ||
885 | if splitlen == 1: | ||
886 | retval = [None] | ||
887 | p_split = pkgsplit(mydata,silent) | ||
888 | else: | ||
889 | retval = [mysplit[splitlen - 2]] | ||
890 | p_split = pkgsplit(mysplit[splitlen - 1],silent) | ||
891 | if not p_split: | ||
892 | __catpkgsplit_cache__[mydata] = None | ||
893 | return None | ||
894 | retval.extend(p_split) | ||
895 | __catpkgsplit_cache__[mydata] = retval | ||
896 | return retval | ||
897 | |||
898 | |||
899 | ####################################################################### | ||
900 | |||
901 | __vercmp_cache__ = {} | ||
902 | |||
903 | def vercmp(val1,val2): | ||
904 | """This takes two version strings and returns an integer to tell you whether | ||
905 | the versions are the same, val1>val2 or val2>val1. | ||
906 | |||
907 | >>> vercmp('1', '2') | ||
908 | -1.0 | ||
909 | >>> vercmp('2', '1') | ||
910 | 1.0 | ||
911 | >>> vercmp('1', '1.0') | ||
912 | 0 | ||
913 | >>> vercmp('1', '1.1') | ||
914 | -1.0 | ||
915 | >>> vercmp('1.1', '1_p2') | ||
916 | 1.0 | ||
917 | """ | ||
918 | |||
919 | # quick short-circuit | ||
920 | if val1 == val2: | ||
921 | return 0 | ||
922 | valkey = val1+" "+val2 | ||
923 | |||
924 | # cache lookup | ||
925 | try: | ||
926 | return __vercmp_cache__[valkey] | ||
927 | try: | ||
928 | return - __vercmp_cache__[val2+" "+val1] | ||
929 | except KeyError: | ||
930 | pass | ||
931 | except KeyError: | ||
932 | pass | ||
933 | |||
934 | # consider 1_p2 vc 1.1 | ||
935 | # after expansion will become (1_p2,0) vc (1,1) | ||
936 | # then 1_p2 is compared with 1 before 0 is compared with 1 | ||
937 | # to solve the bug we need to convert it to (1,0_p2) | ||
938 | # by splitting _prepart part and adding it back _after_expansion | ||
939 | |||
940 | val1_prepart = val2_prepart = '' | ||
941 | if val1.count('_'): | ||
942 | val1, val1_prepart = val1.split('_', 1) | ||
943 | if val2.count('_'): | ||
944 | val2, val2_prepart = val2.split('_', 1) | ||
945 | |||
946 | # replace '-' by '.' | ||
947 | # FIXME: Is it needed? can val1/2 contain '-'? | ||
948 | |||
949 | val1 = string.split(val1,'-') | ||
950 | if len(val1) == 2: | ||
951 | val1[0] = val1[0] +"."+ val1[1] | ||
952 | val2 = string.split(val2,'-') | ||
953 | if len(val2) == 2: | ||
954 | val2[0] = val2[0] +"."+ val2[1] | ||
955 | |||
956 | val1 = string.split(val1[0],'.') | ||
957 | val2 = string.split(val2[0],'.') | ||
958 | |||
959 | # add back decimal point so that .03 does not become "3" ! | ||
960 | for x in range(1,len(val1)): | ||
961 | if val1[x][0] == '0' : | ||
962 | val1[x] = '.' + val1[x] | ||
963 | for x in range(1,len(val2)): | ||
964 | if val2[x][0] == '0' : | ||
965 | val2[x] = '.' + val2[x] | ||
966 | |||
967 | # extend varion numbers | ||
968 | if len(val2) < len(val1): | ||
969 | val2.extend(["0"]*(len(val1)-len(val2))) | ||
970 | elif len(val1) < len(val2): | ||
971 | val1.extend(["0"]*(len(val2)-len(val1))) | ||
972 | |||
973 | # add back _prepart tails | ||
974 | if val1_prepart: | ||
975 | val1[-1] += '_' + val1_prepart | ||
976 | if val2_prepart: | ||
977 | val2[-1] += '_' + val2_prepart | ||
978 | # The above code will extend version numbers out so they | ||
979 | # have the same number of digits. | ||
980 | for x in range(0,len(val1)): | ||
981 | cmp1 = relparse(val1[x]) | ||
982 | cmp2 = relparse(val2[x]) | ||
983 | for y in range(0,3): | ||
984 | myret = cmp1[y] - cmp2[y] | ||
985 | if myret != 0: | ||
986 | __vercmp_cache__[valkey] = myret | ||
987 | return myret | ||
988 | __vercmp_cache__[valkey] = 0 | ||
989 | return 0 | ||
990 | |||
991 | |||
992 | ####################################################################### | ||
993 | |||
994 | def pkgcmp(pkg1,pkg2): | ||
995 | """ Compares two packages, which should have been split via | ||
996 | pkgsplit(). if the return value val is less than zero, then pkg2 is | ||
997 | newer than pkg1, zero if equal and positive if older. | ||
998 | |||
999 | >>> pkgcmp(['glibc', '2.2.5', 'r7'], ['glibc', '2.2.5', 'r7']) | ||
1000 | 0 | ||
1001 | >>> pkgcmp(['glibc', '2.2.5', 'r4'], ['glibc', '2.2.5', 'r7']) | ||
1002 | -1 | ||
1003 | >>> pkgcmp(['glibc', '2.2.5', 'r7'], ['glibc', '2.2.5', 'r2']) | ||
1004 | 1 | ||
1005 | """ | ||
1006 | |||
1007 | mycmp = vercmp(pkg1[1],pkg2[1]) | ||
1008 | if mycmp > 0: | ||
1009 | return 1 | ||
1010 | if mycmp < 0: | ||
1011 | return -1 | ||
1012 | r1=string.atoi(pkg1[2][1:]) | ||
1013 | r2=string.atoi(pkg2[2][1:]) | ||
1014 | if r1 > r2: | ||
1015 | return 1 | ||
1016 | if r2 > r1: | ||
1017 | return -1 | ||
1018 | return 0 | ||
1019 | |||
1020 | |||
1021 | ####################################################################### | ||
1022 | |||
1023 | def dep_parenreduce(mysplit, mypos=0): | ||
1024 | """Accepts a list of strings, and converts '(' and ')' surrounded items to sub-lists: | ||
1025 | |||
1026 | >>> dep_parenreduce(['']) | ||
1027 | [''] | ||
1028 | >>> dep_parenreduce(['1', '2', '3']) | ||
1029 | ['1', '2', '3'] | ||
1030 | >>> dep_parenreduce(['1', '(', '2', '3', ')', '4']) | ||
1031 | ['1', ['2', '3'], '4'] | ||
1032 | """ | ||
1033 | |||
1034 | while mypos < len(mysplit): | ||
1035 | if mysplit[mypos] == "(": | ||
1036 | firstpos = mypos | ||
1037 | mypos = mypos + 1 | ||
1038 | while mypos < len(mysplit): | ||
1039 | if mysplit[mypos] == ")": | ||
1040 | mysplit[firstpos:mypos+1] = [mysplit[firstpos+1:mypos]] | ||
1041 | mypos = firstpos | ||
1042 | break | ||
1043 | elif mysplit[mypos] == "(": | ||
1044 | # recurse | ||
1045 | mysplit = dep_parenreduce(mysplit,mypos) | ||
1046 | mypos = mypos + 1 | ||
1047 | mypos = mypos + 1 | ||
1048 | return mysplit | ||
1049 | |||
1050 | |||
1051 | def dep_opconvert(mysplit, myuse): | ||
1052 | "Does dependency operator conversion" | ||
1053 | |||
1054 | mypos = 0 | ||
1055 | newsplit = [] | ||
1056 | while mypos < len(mysplit): | ||
1057 | if type(mysplit[mypos]) == types.ListType: | ||
1058 | newsplit.append(dep_opconvert(mysplit[mypos],myuse)) | ||
1059 | mypos += 1 | ||
1060 | elif mysplit[mypos] == ")": | ||
1061 | # mismatched paren, error | ||
1062 | return None | ||
1063 | elif mysplit[mypos]=="||": | ||
1064 | if ((mypos+1)>=len(mysplit)) or (type(mysplit[mypos+1])!=types.ListType): | ||
1065 | # || must be followed by paren'd list | ||
1066 | return None | ||
1067 | try: | ||
1068 | mynew = dep_opconvert(mysplit[mypos+1],myuse) | ||
1069 | except Exception, e: | ||
1070 | error("unable to satisfy OR dependancy: " + string.join(mysplit," || ")) | ||
1071 | raise e | ||
1072 | mynew[0:0] = ["||"] | ||
1073 | newsplit.append(mynew) | ||
1074 | mypos += 2 | ||
1075 | elif mysplit[mypos][-1] == "?": | ||
1076 | # use clause, i.e "gnome? ( foo bar )" | ||
1077 | # this is a quick and dirty hack so that repoman can enable all USE vars: | ||
1078 | if (len(myuse) == 1) and (myuse[0] == "*"): | ||
1079 | # enable it even if it's ! (for repoman) but kill it if it's | ||
1080 | # an arch variable that isn't for this arch. XXX Sparc64? | ||
1081 | if (mysplit[mypos][:-1] not in settings.usemask) or \ | ||
1082 | (mysplit[mypos][:-1]==settings["ARCH"]): | ||
1083 | enabled=1 | ||
1084 | else: | ||
1085 | enabled=0 | ||
1086 | else: | ||
1087 | if mysplit[mypos][0] == "!": | ||
1088 | myusevar = mysplit[mypos][1:-1] | ||
1089 | enabled = not myusevar in myuse | ||
1090 | #if myusevar in myuse: | ||
1091 | # enabled = 0 | ||
1092 | #else: | ||
1093 | # enabled = 1 | ||
1094 | else: | ||
1095 | myusevar=mysplit[mypos][:-1] | ||
1096 | enabled = myusevar in myuse | ||
1097 | #if myusevar in myuse: | ||
1098 | # enabled=1 | ||
1099 | #else: | ||
1100 | # enabled=0 | ||
1101 | if (mypos +2 < len(mysplit)) and (mysplit[mypos+2] == ":"): | ||
1102 | # colon mode | ||
1103 | if enabled: | ||
1104 | # choose the first option | ||
1105 | if type(mysplit[mypos+1]) == types.ListType: | ||
1106 | newsplit.append(dep_opconvert(mysplit[mypos+1],myuse)) | ||
1107 | else: | ||
1108 | newsplit.append(mysplit[mypos+1]) | ||
1109 | else: | ||
1110 | # choose the alternate option | ||
1111 | if type(mysplit[mypos+1]) == types.ListType: | ||
1112 | newsplit.append(dep_opconvert(mysplit[mypos+3],myuse)) | ||
1113 | else: | ||
1114 | newsplit.append(mysplit[mypos+3]) | ||
1115 | mypos += 4 | ||
1116 | else: | ||
1117 | # normal use mode | ||
1118 | if enabled: | ||
1119 | if type(mysplit[mypos+1]) == types.ListType: | ||
1120 | newsplit.append(dep_opconvert(mysplit[mypos+1],myuse)) | ||
1121 | else: | ||
1122 | newsplit.append(mysplit[mypos+1]) | ||
1123 | # otherwise, continue | ||
1124 | mypos += 2 | ||
1125 | else: | ||
1126 | # normal item | ||
1127 | newsplit.append(mysplit[mypos]) | ||
1128 | mypos += 1 | ||
1129 | return newsplit | ||
1130 | |||
1131 | if __name__ == "__main__": | ||
1132 | import doctest, bb | ||
1133 | bb.msg.set_debug_level(0) | ||
1134 | doctest.testmod(bb) | ||
diff --git a/bitbake-dev/lib/bb/build.py b/bitbake-dev/lib/bb/build.py deleted file mode 100644 index 6d80b4b549..0000000000 --- a/bitbake-dev/lib/bb/build.py +++ /dev/null | |||
@@ -1,394 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # BitBake 'Build' implementation | ||
5 | # | ||
6 | # Core code for function execution and task handling in the | ||
7 | # BitBake build tools. | ||
8 | # | ||
9 | # Copyright (C) 2003, 2004 Chris Larson | ||
10 | # | ||
11 | # Based on Gentoo's portage.py. | ||
12 | # | ||
13 | # This program is free software; you can redistribute it and/or modify | ||
14 | # it under the terms of the GNU General Public License version 2 as | ||
15 | # published by the Free Software Foundation. | ||
16 | # | ||
17 | # This program is distributed in the hope that it will be useful, | ||
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | # GNU General Public License for more details. | ||
21 | # | ||
22 | # You should have received a copy of the GNU General Public License along | ||
23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
25 | # | ||
26 | #Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
27 | |||
28 | from bb import data, event, mkdirhier, utils | ||
29 | import bb, os, sys | ||
30 | |||
31 | # When we execute a python function we'd like certain things | ||
32 | # in all namespaces, hence we add them to __builtins__ | ||
33 | # If we do not do this and use the exec globals, they will | ||
34 | # not be available to subfunctions. | ||
35 | __builtins__['bb'] = bb | ||
36 | __builtins__['os'] = os | ||
37 | |||
38 | # events | ||
39 | class FuncFailed(Exception): | ||
40 | """ | ||
41 | Executed function failed | ||
42 | First parameter a message | ||
43 | Second paramter is a logfile (optional) | ||
44 | """ | ||
45 | |||
46 | class EventException(Exception): | ||
47 | """Exception which is associated with an Event.""" | ||
48 | |||
49 | def __init__(self, msg, event): | ||
50 | self.args = msg, event | ||
51 | |||
52 | class TaskBase(event.Event): | ||
53 | """Base class for task events""" | ||
54 | |||
55 | def __init__(self, t, d ): | ||
56 | self._task = t | ||
57 | self._package = bb.data.getVar("PF", d, 1) | ||
58 | event.Event.__init__(self) | ||
59 | self._message = "package %s: task %s: %s" % (bb.data.getVar("PF", d, 1), t, bb.event.getName(self)[4:]) | ||
60 | |||
61 | def getTask(self): | ||
62 | return self._task | ||
63 | |||
64 | def setTask(self, task): | ||
65 | self._task = task | ||
66 | |||
67 | task = property(getTask, setTask, None, "task property") | ||
68 | |||
69 | class TaskStarted(TaskBase): | ||
70 | """Task execution started""" | ||
71 | |||
72 | class TaskSucceeded(TaskBase): | ||
73 | """Task execution completed""" | ||
74 | |||
75 | class TaskFailed(TaskBase): | ||
76 | """Task execution failed""" | ||
77 | def __init__(self, msg, logfile, t, d ): | ||
78 | self.logfile = logfile | ||
79 | self.msg = msg | ||
80 | TaskBase.__init__(self, t, d) | ||
81 | |||
82 | class InvalidTask(TaskBase): | ||
83 | """Invalid Task""" | ||
84 | |||
85 | # functions | ||
86 | |||
87 | def exec_func(func, d, dirs = None): | ||
88 | """Execute an BB 'function'""" | ||
89 | |||
90 | body = data.getVar(func, d) | ||
91 | if not body: | ||
92 | return | ||
93 | |||
94 | flags = data.getVarFlags(func, d) | ||
95 | for item in ['deps', 'check', 'interactive', 'python', 'cleandirs', 'dirs', 'lockfiles', 'fakeroot']: | ||
96 | if not item in flags: | ||
97 | flags[item] = None | ||
98 | |||
99 | ispython = flags['python'] | ||
100 | |||
101 | cleandirs = (data.expand(flags['cleandirs'], d) or "").split() | ||
102 | for cdir in cleandirs: | ||
103 | os.system("rm -rf %s" % cdir) | ||
104 | |||
105 | if dirs: | ||
106 | dirs = data.expand(dirs, d) | ||
107 | else: | ||
108 | dirs = (data.expand(flags['dirs'], d) or "").split() | ||
109 | for adir in dirs: | ||
110 | mkdirhier(adir) | ||
111 | |||
112 | if len(dirs) > 0: | ||
113 | adir = dirs[-1] | ||
114 | else: | ||
115 | adir = data.getVar('B', d, 1) | ||
116 | |||
117 | # Save current directory | ||
118 | try: | ||
119 | prevdir = os.getcwd() | ||
120 | except OSError: | ||
121 | prevdir = data.getVar('TOPDIR', d, True) | ||
122 | |||
123 | # Setup logfiles | ||
124 | t = data.getVar('T', d, 1) | ||
125 | if not t: | ||
126 | bb.msg.fatal(bb.msg.domain.Build, "T not set") | ||
127 | mkdirhier(t) | ||
128 | # Gross hack, FIXME | ||
129 | import random | ||
130 | logfile = "%s/log.%s.%s.%s" % (t, func, str(os.getpid()),random.random()) | ||
131 | runfile = "%s/run.%s.%s" % (t, func, str(os.getpid())) | ||
132 | |||
133 | # Change to correct directory (if specified) | ||
134 | if adir and os.access(adir, os.F_OK): | ||
135 | os.chdir(adir) | ||
136 | |||
137 | # Handle logfiles | ||
138 | si = file('/dev/null', 'r') | ||
139 | try: | ||
140 | if bb.msg.debug_level['default'] > 0 or ispython: | ||
141 | so = os.popen("tee \"%s\"" % logfile, "w") | ||
142 | else: | ||
143 | so = file(logfile, 'w') | ||
144 | except OSError, e: | ||
145 | bb.msg.error(bb.msg.domain.Build, "opening log file: %s" % e) | ||
146 | pass | ||
147 | |||
148 | se = so | ||
149 | |||
150 | # Dup the existing fds so we dont lose them | ||
151 | osi = [os.dup(sys.stdin.fileno()), sys.stdin.fileno()] | ||
152 | oso = [os.dup(sys.stdout.fileno()), sys.stdout.fileno()] | ||
153 | ose = [os.dup(sys.stderr.fileno()), sys.stderr.fileno()] | ||
154 | |||
155 | # Replace those fds with our own | ||
156 | os.dup2(si.fileno(), osi[1]) | ||
157 | os.dup2(so.fileno(), oso[1]) | ||
158 | os.dup2(se.fileno(), ose[1]) | ||
159 | |||
160 | locks = [] | ||
161 | lockfiles = (data.expand(flags['lockfiles'], d) or "").split() | ||
162 | for lock in lockfiles: | ||
163 | locks.append(bb.utils.lockfile(lock)) | ||
164 | |||
165 | try: | ||
166 | # Run the function | ||
167 | if ispython: | ||
168 | exec_func_python(func, d, runfile, logfile) | ||
169 | else: | ||
170 | exec_func_shell(func, d, runfile, logfile, flags) | ||
171 | |||
172 | # Restore original directory | ||
173 | try: | ||
174 | os.chdir(prevdir) | ||
175 | except: | ||
176 | pass | ||
177 | |||
178 | finally: | ||
179 | |||
180 | # Unlock any lockfiles | ||
181 | for lock in locks: | ||
182 | bb.utils.unlockfile(lock) | ||
183 | |||
184 | # Restore the backup fds | ||
185 | os.dup2(osi[0], osi[1]) | ||
186 | os.dup2(oso[0], oso[1]) | ||
187 | os.dup2(ose[0], ose[1]) | ||
188 | |||
189 | # Close our logs | ||
190 | si.close() | ||
191 | so.close() | ||
192 | se.close() | ||
193 | |||
194 | if os.path.exists(logfile) and os.path.getsize(logfile) == 0: | ||
195 | bb.msg.debug(2, bb.msg.domain.Build, "Zero size logfile %s, removing" % logfile) | ||
196 | os.remove(logfile) | ||
197 | |||
198 | # Close the backup fds | ||
199 | os.close(osi[0]) | ||
200 | os.close(oso[0]) | ||
201 | os.close(ose[0]) | ||
202 | |||
203 | def exec_func_python(func, d, runfile, logfile): | ||
204 | """Execute a python BB 'function'""" | ||
205 | import re, os | ||
206 | |||
207 | bbfile = bb.data.getVar('FILE', d, 1) | ||
208 | tmp = "def " + func + "():\n%s" % data.getVar(func, d) | ||
209 | tmp += '\n' + func + '()' | ||
210 | |||
211 | f = open(runfile, "w") | ||
212 | f.write(tmp) | ||
213 | comp = utils.better_compile(tmp, func, bbfile) | ||
214 | g = {} # globals | ||
215 | g['d'] = d | ||
216 | try: | ||
217 | utils.better_exec(comp, g, tmp, bbfile) | ||
218 | except: | ||
219 | (t,value,tb) = sys.exc_info() | ||
220 | |||
221 | if t in [bb.parse.SkipPackage, bb.build.FuncFailed]: | ||
222 | raise | ||
223 | bb.msg.error(bb.msg.domain.Build, "Function %s failed" % func) | ||
224 | raise FuncFailed("function %s failed" % func, logfile) | ||
225 | |||
226 | def exec_func_shell(func, d, runfile, logfile, flags): | ||
227 | """Execute a shell BB 'function' Returns true if execution was successful. | ||
228 | |||
229 | For this, it creates a bash shell script in the tmp dectory, writes the local | ||
230 | data into it and finally executes. The output of the shell will end in a log file and stdout. | ||
231 | |||
232 | Note on directory behavior. The 'dirs' varflag should contain a list | ||
233 | of the directories you need created prior to execution. The last | ||
234 | item in the list is where we will chdir/cd to. | ||
235 | """ | ||
236 | |||
237 | deps = flags['deps'] | ||
238 | check = flags['check'] | ||
239 | if check in globals(): | ||
240 | if globals()[check](func, deps): | ||
241 | return | ||
242 | |||
243 | f = open(runfile, "w") | ||
244 | f.write("#!/bin/sh -e\n") | ||
245 | if bb.msg.debug_level['default'] > 0: f.write("set -x\n") | ||
246 | data.emit_env(f, d) | ||
247 | |||
248 | f.write("cd %s\n" % os.getcwd()) | ||
249 | if func: f.write("%s\n" % func) | ||
250 | f.close() | ||
251 | os.chmod(runfile, 0775) | ||
252 | if not func: | ||
253 | bb.msg.error(bb.msg.domain.Build, "Function not specified") | ||
254 | raise FuncFailed("Function not specified for exec_func_shell") | ||
255 | |||
256 | # execute function | ||
257 | if flags['fakeroot']: | ||
258 | maybe_fakeroot = "PATH=\"%s\" fakeroot " % bb.data.getVar("PATH", d, 1) | ||
259 | else: | ||
260 | maybe_fakeroot = '' | ||
261 | lang_environment = "LC_ALL=C " | ||
262 | ret = os.system('%s%ssh -e %s' % (lang_environment, maybe_fakeroot, runfile)) | ||
263 | |||
264 | if ret == 0: | ||
265 | return | ||
266 | |||
267 | bb.msg.error(bb.msg.domain.Build, "Function %s failed" % func) | ||
268 | raise FuncFailed("function %s failed" % func, logfile) | ||
269 | |||
270 | |||
271 | def exec_task(task, d): | ||
272 | """Execute an BB 'task' | ||
273 | |||
274 | The primary difference between executing a task versus executing | ||
275 | a function is that a task exists in the task digraph, and therefore | ||
276 | has dependencies amongst other tasks.""" | ||
277 | |||
278 | # Check whther this is a valid task | ||
279 | if not data.getVarFlag(task, 'task', d): | ||
280 | raise EventException("No such task", InvalidTask(task, d)) | ||
281 | |||
282 | try: | ||
283 | bb.msg.debug(1, bb.msg.domain.Build, "Executing task %s" % task) | ||
284 | old_overrides = data.getVar('OVERRIDES', d, 0) | ||
285 | localdata = data.createCopy(d) | ||
286 | data.setVar('OVERRIDES', 'task-%s:%s' % (task[3:], old_overrides), localdata) | ||
287 | data.update_data(localdata) | ||
288 | data.expandKeys(localdata) | ||
289 | event.fire(TaskStarted(task, localdata), localdata) | ||
290 | exec_func(task, localdata) | ||
291 | event.fire(TaskSucceeded(task, localdata), localdata) | ||
292 | except FuncFailed, message: | ||
293 | # Try to extract the optional logfile | ||
294 | try: | ||
295 | (msg, logfile) = message | ||
296 | except: | ||
297 | logfile = None | ||
298 | msg = message | ||
299 | bb.msg.note(1, bb.msg.domain.Build, "Task failed: %s" % message ) | ||
300 | failedevent = TaskFailed(msg, logfile, task, d) | ||
301 | event.fire(failedevent, d) | ||
302 | raise EventException("Function failed in task: %s" % message, failedevent) | ||
303 | |||
304 | # make stamp, or cause event and raise exception | ||
305 | if not data.getVarFlag(task, 'nostamp', d) and not data.getVarFlag(task, 'selfstamp', d): | ||
306 | make_stamp(task, d) | ||
307 | |||
308 | def extract_stamp(d, fn): | ||
309 | """ | ||
310 | Extracts stamp format which is either a data dictonary (fn unset) | ||
311 | or a dataCache entry (fn set). | ||
312 | """ | ||
313 | if fn: | ||
314 | return d.stamp[fn] | ||
315 | return data.getVar('STAMP', d, 1) | ||
316 | |||
317 | def stamp_internal(task, d, file_name): | ||
318 | """ | ||
319 | Internal stamp helper function | ||
320 | Removes any stamp for the given task | ||
321 | Makes sure the stamp directory exists | ||
322 | Returns the stamp path+filename | ||
323 | """ | ||
324 | stamp = extract_stamp(d, file_name) | ||
325 | if not stamp: | ||
326 | return | ||
327 | stamp = "%s.%s" % (stamp, task) | ||
328 | mkdirhier(os.path.dirname(stamp)) | ||
329 | # Remove the file and recreate to force timestamp | ||
330 | # change on broken NFS filesystems | ||
331 | if os.access(stamp, os.F_OK): | ||
332 | os.remove(stamp) | ||
333 | return stamp | ||
334 | |||
335 | def make_stamp(task, d, file_name = None): | ||
336 | """ | ||
337 | Creates/updates a stamp for a given task | ||
338 | (d can be a data dict or dataCache) | ||
339 | """ | ||
340 | stamp = stamp_internal(task, d, file_name) | ||
341 | if stamp: | ||
342 | f = open(stamp, "w") | ||
343 | f.close() | ||
344 | |||
345 | def del_stamp(task, d, file_name = None): | ||
346 | """ | ||
347 | Removes a stamp for a given task | ||
348 | (d can be a data dict or dataCache) | ||
349 | """ | ||
350 | stamp_internal(task, d, file_name) | ||
351 | |||
352 | def add_tasks(tasklist, d): | ||
353 | task_deps = data.getVar('_task_deps', d) | ||
354 | if not task_deps: | ||
355 | task_deps = {} | ||
356 | if not 'tasks' in task_deps: | ||
357 | task_deps['tasks'] = [] | ||
358 | if not 'parents' in task_deps: | ||
359 | task_deps['parents'] = {} | ||
360 | |||
361 | for task in tasklist: | ||
362 | task = data.expand(task, d) | ||
363 | data.setVarFlag(task, 'task', 1, d) | ||
364 | |||
365 | if not task in task_deps['tasks']: | ||
366 | task_deps['tasks'].append(task) | ||
367 | |||
368 | flags = data.getVarFlags(task, d) | ||
369 | def getTask(name): | ||
370 | if not name in task_deps: | ||
371 | task_deps[name] = {} | ||
372 | if name in flags: | ||
373 | deptask = data.expand(flags[name], d) | ||
374 | task_deps[name][task] = deptask | ||
375 | getTask('depends') | ||
376 | getTask('deptask') | ||
377 | getTask('rdeptask') | ||
378 | getTask('recrdeptask') | ||
379 | getTask('nostamp') | ||
380 | task_deps['parents'][task] = [] | ||
381 | for dep in flags['deps']: | ||
382 | dep = data.expand(dep, d) | ||
383 | task_deps['parents'][task].append(dep) | ||
384 | |||
385 | # don't assume holding a reference | ||
386 | data.setVar('_task_deps', task_deps, d) | ||
387 | |||
388 | def remove_task(task, kill, d): | ||
389 | """Remove an BB 'task'. | ||
390 | |||
391 | If kill is 1, also remove tasks that depend on this task.""" | ||
392 | |||
393 | data.delVarFlag(task, 'task', d) | ||
394 | |||
diff --git a/bitbake-dev/lib/bb/cache.py b/bitbake-dev/lib/bb/cache.py deleted file mode 100644 index 2f1b8fa601..0000000000 --- a/bitbake-dev/lib/bb/cache.py +++ /dev/null | |||
@@ -1,533 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # BitBake 'Event' implementation | ||
5 | # | ||
6 | # Caching of bitbake variables before task execution | ||
7 | |||
8 | # Copyright (C) 2006 Richard Purdie | ||
9 | |||
10 | # but small sections based on code from bin/bitbake: | ||
11 | # Copyright (C) 2003, 2004 Chris Larson | ||
12 | # Copyright (C) 2003, 2004 Phil Blundell | ||
13 | # Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer | ||
14 | # Copyright (C) 2005 Holger Hans Peter Freyther | ||
15 | # Copyright (C) 2005 ROAD GmbH | ||
16 | # | ||
17 | # This program is free software; you can redistribute it and/or modify | ||
18 | # it under the terms of the GNU General Public License version 2 as | ||
19 | # published by the Free Software Foundation. | ||
20 | # | ||
21 | # This program is distributed in the hope that it will be useful, | ||
22 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
23 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
24 | # GNU General Public License for more details. | ||
25 | # | ||
26 | # You should have received a copy of the GNU General Public License along | ||
27 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
28 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
29 | |||
30 | |||
31 | import os, re | ||
32 | import bb.data | ||
33 | import bb.utils | ||
34 | |||
35 | try: | ||
36 | import cPickle as pickle | ||
37 | except ImportError: | ||
38 | import pickle | ||
39 | bb.msg.note(1, bb.msg.domain.Cache, "Importing cPickle failed. Falling back to a very slow implementation.") | ||
40 | |||
41 | __cache_version__ = "130" | ||
42 | |||
43 | class Cache: | ||
44 | """ | ||
45 | BitBake Cache implementation | ||
46 | """ | ||
47 | def __init__(self, cooker): | ||
48 | |||
49 | |||
50 | self.cachedir = bb.data.getVar("CACHE", cooker.configuration.data, True) | ||
51 | self.clean = {} | ||
52 | self.checked = {} | ||
53 | self.depends_cache = {} | ||
54 | self.data = None | ||
55 | self.data_fn = None | ||
56 | self.cacheclean = True | ||
57 | |||
58 | if self.cachedir in [None, '']: | ||
59 | self.has_cache = False | ||
60 | bb.msg.note(1, bb.msg.domain.Cache, "Not using a cache. Set CACHE = <directory> to enable.") | ||
61 | return | ||
62 | |||
63 | self.has_cache = True | ||
64 | self.cachefile = os.path.join(self.cachedir,"bb_cache.dat") | ||
65 | |||
66 | bb.msg.debug(1, bb.msg.domain.Cache, "Using cache in '%s'" % self.cachedir) | ||
67 | try: | ||
68 | os.stat( self.cachedir ) | ||
69 | except OSError: | ||
70 | bb.mkdirhier( self.cachedir ) | ||
71 | |||
72 | # If any of configuration.data's dependencies are newer than the | ||
73 | # cache there isn't even any point in loading it... | ||
74 | newest_mtime = 0 | ||
75 | deps = bb.data.getVar("__depends", cooker.configuration.data, True) | ||
76 | for f,old_mtime in deps: | ||
77 | if old_mtime > newest_mtime: | ||
78 | newest_mtime = old_mtime | ||
79 | |||
80 | if bb.parse.cached_mtime_noerror(self.cachefile) >= newest_mtime: | ||
81 | try: | ||
82 | p = pickle.Unpickler(file(self.cachefile, "rb")) | ||
83 | self.depends_cache, version_data = p.load() | ||
84 | if version_data['CACHE_VER'] != __cache_version__: | ||
85 | raise ValueError, 'Cache Version Mismatch' | ||
86 | if version_data['BITBAKE_VER'] != bb.__version__: | ||
87 | raise ValueError, 'Bitbake Version Mismatch' | ||
88 | except EOFError: | ||
89 | bb.msg.note(1, bb.msg.domain.Cache, "Truncated cache found, rebuilding...") | ||
90 | self.depends_cache = {} | ||
91 | except: | ||
92 | bb.msg.note(1, bb.msg.domain.Cache, "Invalid cache found, rebuilding...") | ||
93 | self.depends_cache = {} | ||
94 | else: | ||
95 | try: | ||
96 | os.stat( self.cachefile ) | ||
97 | bb.msg.note(1, bb.msg.domain.Cache, "Out of date cache found, rebuilding...") | ||
98 | except OSError: | ||
99 | pass | ||
100 | |||
101 | def getVar(self, var, fn, exp = 0): | ||
102 | """ | ||
103 | Gets the value of a variable | ||
104 | (similar to getVar in the data class) | ||
105 | |||
106 | There are two scenarios: | ||
107 | 1. We have cached data - serve from depends_cache[fn] | ||
108 | 2. We're learning what data to cache - serve from data | ||
109 | backend but add a copy of the data to the cache. | ||
110 | """ | ||
111 | if fn in self.clean: | ||
112 | return self.depends_cache[fn][var] | ||
113 | |||
114 | if not fn in self.depends_cache: | ||
115 | self.depends_cache[fn] = {} | ||
116 | |||
117 | if fn != self.data_fn: | ||
118 | # We're trying to access data in the cache which doesn't exist | ||
119 | # yet setData hasn't been called to setup the right access. Very bad. | ||
120 | bb.msg.error(bb.msg.domain.Cache, "Parsing error data_fn %s and fn %s don't match" % (self.data_fn, fn)) | ||
121 | |||
122 | self.cacheclean = False | ||
123 | result = bb.data.getVar(var, self.data, exp) | ||
124 | self.depends_cache[fn][var] = result | ||
125 | return result | ||
126 | |||
127 | def setData(self, virtualfn, fn, data): | ||
128 | """ | ||
129 | Called to prime bb_cache ready to learn which variables to cache. | ||
130 | Will be followed by calls to self.getVar which aren't cached | ||
131 | but can be fulfilled from self.data. | ||
132 | """ | ||
133 | self.data_fn = virtualfn | ||
134 | self.data = data | ||
135 | |||
136 | # Make sure __depends makes the depends_cache | ||
137 | # If we're a virtual class we need to make sure all our depends are appended | ||
138 | # to the depends of fn. | ||
139 | depends = self.getVar("__depends", virtualfn, True) or [] | ||
140 | if "__depends" not in self.depends_cache[fn] or not self.depends_cache[fn]["__depends"]: | ||
141 | self.depends_cache[fn]["__depends"] = depends | ||
142 | for dep in depends: | ||
143 | if dep not in self.depends_cache[fn]["__depends"]: | ||
144 | self.depends_cache[fn]["__depends"].append(dep) | ||
145 | |||
146 | # Make sure BBCLASSEXTEND always makes the cache too | ||
147 | self.getVar('BBCLASSEXTEND', virtualfn, True) | ||
148 | |||
149 | self.depends_cache[virtualfn]["CACHETIMESTAMP"] = bb.parse.cached_mtime(fn) | ||
150 | |||
151 | def virtualfn2realfn(self, virtualfn): | ||
152 | """ | ||
153 | Convert a virtual file name to a real one + the associated subclass keyword | ||
154 | """ | ||
155 | |||
156 | fn = virtualfn | ||
157 | cls = "" | ||
158 | if virtualfn.startswith('virtual:'): | ||
159 | cls = virtualfn.split(':', 2)[1] | ||
160 | fn = virtualfn.replace('virtual:' + cls + ':', '') | ||
161 | #bb.msg.debug(2, bb.msg.domain.Cache, "virtualfn2realfn %s to %s %s" % (virtualfn, fn, cls)) | ||
162 | return (fn, cls) | ||
163 | |||
164 | def realfn2virtual(self, realfn, cls): | ||
165 | """ | ||
166 | Convert a real filename + the associated subclass keyword to a virtual filename | ||
167 | """ | ||
168 | if cls == "": | ||
169 | #bb.msg.debug(2, bb.msg.domain.Cache, "realfn2virtual %s and '%s' to %s" % (realfn, cls, realfn)) | ||
170 | return realfn | ||
171 | #bb.msg.debug(2, bb.msg.domain.Cache, "realfn2virtual %s and %s to %s" % (realfn, cls, "virtual:" + cls + ":" + realfn)) | ||
172 | return "virtual:" + cls + ":" + realfn | ||
173 | |||
174 | def loadDataFull(self, virtualfn, cfgData): | ||
175 | """ | ||
176 | Return a complete set of data for fn. | ||
177 | To do this, we need to parse the file. | ||
178 | """ | ||
179 | |||
180 | (fn, cls) = self.virtualfn2realfn(virtualfn) | ||
181 | |||
182 | bb.msg.debug(1, bb.msg.domain.Cache, "Parsing %s (full)" % fn) | ||
183 | |||
184 | bb_data = self.load_bbfile(fn, cfgData) | ||
185 | return bb_data[cls] | ||
186 | |||
187 | def loadData(self, fn, cfgData, cacheData): | ||
188 | """ | ||
189 | Load a subset of data for fn. | ||
190 | If the cached data is valid we do nothing, | ||
191 | To do this, we need to parse the file and set the system | ||
192 | to record the variables accessed. | ||
193 | Return the cache status and whether the file was skipped when parsed | ||
194 | """ | ||
195 | skipped = 0 | ||
196 | virtuals = 0 | ||
197 | |||
198 | if fn not in self.checked: | ||
199 | self.cacheValidUpdate(fn) | ||
200 | |||
201 | if self.cacheValid(fn): | ||
202 | multi = self.getVar('BBCLASSEXTEND', fn, True) | ||
203 | for cls in (multi or "").split() + [""]: | ||
204 | virtualfn = self.realfn2virtual(fn, cls) | ||
205 | if self.depends_cache[virtualfn]["__SKIPPED"]: | ||
206 | skipped += 1 | ||
207 | bb.msg.debug(1, bb.msg.domain.Cache, "Skipping %s" % virtualfn) | ||
208 | continue | ||
209 | self.handle_data(virtualfn, cacheData) | ||
210 | virtuals += 1 | ||
211 | return True, skipped, virtuals | ||
212 | |||
213 | bb.msg.debug(1, bb.msg.domain.Cache, "Parsing %s" % fn) | ||
214 | |||
215 | bb_data = self.load_bbfile(fn, cfgData) | ||
216 | |||
217 | for data in bb_data: | ||
218 | virtualfn = self.realfn2virtual(fn, data) | ||
219 | self.setData(virtualfn, fn, bb_data[data]) | ||
220 | if self.getVar("__SKIPPED", virtualfn, True): | ||
221 | skipped += 1 | ||
222 | bb.msg.debug(1, bb.msg.domain.Cache, "Skipping %s" % virtualfn) | ||
223 | else: | ||
224 | self.handle_data(virtualfn, cacheData) | ||
225 | virtuals += 1 | ||
226 | return False, skipped, virtuals | ||
227 | |||
228 | |||
229 | def cacheValid(self, fn): | ||
230 | """ | ||
231 | Is the cache valid for fn? | ||
232 | Fast version, no timestamps checked. | ||
233 | """ | ||
234 | # Is cache enabled? | ||
235 | if not self.has_cache: | ||
236 | return False | ||
237 | if fn in self.clean: | ||
238 | return True | ||
239 | return False | ||
240 | |||
241 | def cacheValidUpdate(self, fn): | ||
242 | """ | ||
243 | Is the cache valid for fn? | ||
244 | Make thorough (slower) checks including timestamps. | ||
245 | """ | ||
246 | # Is cache enabled? | ||
247 | if not self.has_cache: | ||
248 | return False | ||
249 | |||
250 | self.checked[fn] = "" | ||
251 | |||
252 | # Pretend we're clean so getVar works | ||
253 | self.clean[fn] = "" | ||
254 | |||
255 | # File isn't in depends_cache | ||
256 | if not fn in self.depends_cache: | ||
257 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s is not cached" % fn) | ||
258 | self.remove(fn) | ||
259 | return False | ||
260 | |||
261 | mtime = bb.parse.cached_mtime_noerror(fn) | ||
262 | |||
263 | # Check file still exists | ||
264 | if mtime == 0: | ||
265 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s not longer exists" % fn) | ||
266 | self.remove(fn) | ||
267 | return False | ||
268 | |||
269 | # Check the file's timestamp | ||
270 | if mtime != self.getVar("CACHETIMESTAMP", fn, True): | ||
271 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s changed" % fn) | ||
272 | self.remove(fn) | ||
273 | return False | ||
274 | |||
275 | # Check dependencies are still valid | ||
276 | depends = self.getVar("__depends", fn, True) | ||
277 | if depends: | ||
278 | for f,old_mtime in depends: | ||
279 | fmtime = bb.parse.cached_mtime_noerror(f) | ||
280 | # Check if file still exists | ||
281 | if old_mtime != 0 and fmtime == 0: | ||
282 | self.remove(fn) | ||
283 | return False | ||
284 | |||
285 | if (fmtime != old_mtime): | ||
286 | bb.msg.debug(2, bb.msg.domain.Cache, "Cache: %s's dependency %s changed" % (fn, f)) | ||
287 | self.remove(fn) | ||
288 | return False | ||
289 | |||
290 | #bb.msg.debug(2, bb.msg.domain.Cache, "Depends Cache: %s is clean" % fn) | ||
291 | if not fn in self.clean: | ||
292 | self.clean[fn] = "" | ||
293 | |||
294 | # Mark extended class data as clean too | ||
295 | multi = self.getVar('BBCLASSEXTEND', fn, True) | ||
296 | for cls in (multi or "").split(): | ||
297 | virtualfn = self.realfn2virtual(fn, cls) | ||
298 | self.clean[virtualfn] = "" | ||
299 | |||
300 | return True | ||
301 | |||
302 | def remove(self, fn): | ||
303 | """ | ||
304 | Remove a fn from the cache | ||
305 | Called from the parser in error cases | ||
306 | """ | ||
307 | bb.msg.debug(1, bb.msg.domain.Cache, "Removing %s from cache" % fn) | ||
308 | if fn in self.depends_cache: | ||
309 | del self.depends_cache[fn] | ||
310 | if fn in self.clean: | ||
311 | del self.clean[fn] | ||
312 | |||
313 | def sync(self): | ||
314 | """ | ||
315 | Save the cache | ||
316 | Called from the parser when complete (or exiting) | ||
317 | """ | ||
318 | import copy | ||
319 | |||
320 | if not self.has_cache: | ||
321 | return | ||
322 | |||
323 | if self.cacheclean: | ||
324 | bb.msg.note(1, bb.msg.domain.Cache, "Cache is clean, not saving.") | ||
325 | return | ||
326 | |||
327 | version_data = {} | ||
328 | version_data['CACHE_VER'] = __cache_version__ | ||
329 | version_data['BITBAKE_VER'] = bb.__version__ | ||
330 | |||
331 | cache_data = copy.deepcopy(self.depends_cache) | ||
332 | for fn in self.depends_cache: | ||
333 | if '__BB_DONT_CACHE' in self.depends_cache[fn] and self.depends_cache[fn]['__BB_DONT_CACHE']: | ||
334 | bb.msg.debug(2, bb.msg.domain.Cache, "Not caching %s, marked as not cacheable" % fn) | ||
335 | del cache_data[fn] | ||
336 | elif 'PV' in self.depends_cache[fn] and 'SRCREVINACTION' in self.depends_cache[fn]['PV']: | ||
337 | bb.msg.error(bb.msg.domain.Cache, "Not caching %s as it had SRCREVINACTION in PV. Please report this bug" % fn) | ||
338 | del cache_data[fn] | ||
339 | |||
340 | p = pickle.Pickler(file(self.cachefile, "wb" ), -1 ) | ||
341 | p.dump([cache_data, version_data]) | ||
342 | |||
343 | def mtime(self, cachefile): | ||
344 | return bb.parse.cached_mtime_noerror(cachefile) | ||
345 | |||
346 | def handle_data(self, file_name, cacheData): | ||
347 | """ | ||
348 | Save data we need into the cache | ||
349 | """ | ||
350 | |||
351 | pn = self.getVar('PN', file_name, True) | ||
352 | pe = self.getVar('PE', file_name, True) or "0" | ||
353 | pv = self.getVar('PV', file_name, True) | ||
354 | if 'SRCREVINACTION' in pv: | ||
355 | bb.note("Found SRCREVINACTION in PV (%s) or %s. Please report this bug." % (pv, file_name)) | ||
356 | pr = self.getVar('PR', file_name, True) | ||
357 | dp = int(self.getVar('DEFAULT_PREFERENCE', file_name, True) or "0") | ||
358 | depends = bb.utils.explode_deps(self.getVar("DEPENDS", file_name, True) or "") | ||
359 | packages = (self.getVar('PACKAGES', file_name, True) or "").split() | ||
360 | packages_dynamic = (self.getVar('PACKAGES_DYNAMIC', file_name, True) or "").split() | ||
361 | rprovides = (self.getVar("RPROVIDES", file_name, True) or "").split() | ||
362 | |||
363 | cacheData.task_deps[file_name] = self.getVar("_task_deps", file_name, True) | ||
364 | |||
365 | # build PackageName to FileName lookup table | ||
366 | if pn not in cacheData.pkg_pn: | ||
367 | cacheData.pkg_pn[pn] = [] | ||
368 | cacheData.pkg_pn[pn].append(file_name) | ||
369 | |||
370 | cacheData.stamp[file_name] = self.getVar('STAMP', file_name, True) | ||
371 | |||
372 | # build FileName to PackageName lookup table | ||
373 | cacheData.pkg_fn[file_name] = pn | ||
374 | cacheData.pkg_pepvpr[file_name] = (pe,pv,pr) | ||
375 | cacheData.pkg_dp[file_name] = dp | ||
376 | |||
377 | provides = [pn] | ||
378 | for provide in (self.getVar("PROVIDES", file_name, True) or "").split(): | ||
379 | if provide not in provides: | ||
380 | provides.append(provide) | ||
381 | |||
382 | # Build forward and reverse provider hashes | ||
383 | # Forward: virtual -> [filenames] | ||
384 | # Reverse: PN -> [virtuals] | ||
385 | if pn not in cacheData.pn_provides: | ||
386 | cacheData.pn_provides[pn] = [] | ||
387 | |||
388 | cacheData.fn_provides[file_name] = provides | ||
389 | for provide in provides: | ||
390 | if provide not in cacheData.providers: | ||
391 | cacheData.providers[provide] = [] | ||
392 | cacheData.providers[provide].append(file_name) | ||
393 | if not provide in cacheData.pn_provides[pn]: | ||
394 | cacheData.pn_provides[pn].append(provide) | ||
395 | |||
396 | cacheData.deps[file_name] = [] | ||
397 | for dep in depends: | ||
398 | if not dep in cacheData.deps[file_name]: | ||
399 | cacheData.deps[file_name].append(dep) | ||
400 | if not dep in cacheData.all_depends: | ||
401 | cacheData.all_depends.append(dep) | ||
402 | |||
403 | # Build reverse hash for PACKAGES, so runtime dependencies | ||
404 | # can be be resolved (RDEPENDS, RRECOMMENDS etc.) | ||
405 | for package in packages: | ||
406 | if not package in cacheData.packages: | ||
407 | cacheData.packages[package] = [] | ||
408 | cacheData.packages[package].append(file_name) | ||
409 | rprovides += (self.getVar("RPROVIDES_%s" % package, file_name, 1) or "").split() | ||
410 | |||
411 | for package in packages_dynamic: | ||
412 | if not package in cacheData.packages_dynamic: | ||
413 | cacheData.packages_dynamic[package] = [] | ||
414 | cacheData.packages_dynamic[package].append(file_name) | ||
415 | |||
416 | for rprovide in rprovides: | ||
417 | if not rprovide in cacheData.rproviders: | ||
418 | cacheData.rproviders[rprovide] = [] | ||
419 | cacheData.rproviders[rprovide].append(file_name) | ||
420 | |||
421 | # Build hash of runtime depends and rececommends | ||
422 | |||
423 | if not file_name in cacheData.rundeps: | ||
424 | cacheData.rundeps[file_name] = {} | ||
425 | if not file_name in cacheData.runrecs: | ||
426 | cacheData.runrecs[file_name] = {} | ||
427 | |||
428 | rdepends = self.getVar('RDEPENDS', file_name, True) or "" | ||
429 | rrecommends = self.getVar('RRECOMMENDS', file_name, True) or "" | ||
430 | for package in packages + [pn]: | ||
431 | if not package in cacheData.rundeps[file_name]: | ||
432 | cacheData.rundeps[file_name][package] = [] | ||
433 | if not package in cacheData.runrecs[file_name]: | ||
434 | cacheData.runrecs[file_name][package] = [] | ||
435 | |||
436 | cacheData.rundeps[file_name][package] = rdepends + " " + (self.getVar("RDEPENDS_%s" % package, file_name, True) or "") | ||
437 | cacheData.runrecs[file_name][package] = rrecommends + " " + (self.getVar("RRECOMMENDS_%s" % package, file_name, True) or "") | ||
438 | |||
439 | # Collect files we may need for possible world-dep | ||
440 | # calculations | ||
441 | if not self.getVar('BROKEN', file_name, True) and not self.getVar('EXCLUDE_FROM_WORLD', file_name, True): | ||
442 | cacheData.possible_world.append(file_name) | ||
443 | |||
444 | # Touch this to make sure its in the cache | ||
445 | self.getVar('__BB_DONT_CACHE', file_name, True) | ||
446 | self.getVar('BBCLASSEXTEND', file_name, True) | ||
447 | |||
448 | def load_bbfile( self, bbfile , config): | ||
449 | """ | ||
450 | Load and parse one .bb build file | ||
451 | Return the data and whether parsing resulted in the file being skipped | ||
452 | """ | ||
453 | |||
454 | import bb | ||
455 | from bb import utils, data, parse, debug, event, fatal | ||
456 | |||
457 | # expand tmpdir to include this topdir | ||
458 | data.setVar('TMPDIR', data.getVar('TMPDIR', config, 1) or "", config) | ||
459 | bbfile_loc = os.path.abspath(os.path.dirname(bbfile)) | ||
460 | oldpath = os.path.abspath(os.getcwd()) | ||
461 | if bb.parse.cached_mtime_noerror(bbfile_loc): | ||
462 | os.chdir(bbfile_loc) | ||
463 | bb_data = data.init_db(config) | ||
464 | try: | ||
465 | bb_data = parse.handle(bbfile, bb_data) # read .bb data | ||
466 | os.chdir(oldpath) | ||
467 | return bb_data | ||
468 | except: | ||
469 | os.chdir(oldpath) | ||
470 | raise | ||
471 | |||
472 | def init(cooker): | ||
473 | """ | ||
474 | The Objective: Cache the minimum amount of data possible yet get to the | ||
475 | stage of building packages (i.e. tryBuild) without reparsing any .bb files. | ||
476 | |||
477 | To do this, we intercept getVar calls and only cache the variables we see | ||
478 | being accessed. We rely on the cache getVar calls being made for all | ||
479 | variables bitbake might need to use to reach this stage. For each cached | ||
480 | file we need to track: | ||
481 | |||
482 | * Its mtime | ||
483 | * The mtimes of all its dependencies | ||
484 | * Whether it caused a parse.SkipPackage exception | ||
485 | |||
486 | Files causing parsing errors are evicted from the cache. | ||
487 | |||
488 | """ | ||
489 | return Cache(cooker) | ||
490 | |||
491 | |||
492 | |||
493 | #============================================================================# | ||
494 | # CacheData | ||
495 | #============================================================================# | ||
496 | class CacheData: | ||
497 | """ | ||
498 | The data structures we compile from the cached data | ||
499 | """ | ||
500 | |||
501 | def __init__(self): | ||
502 | """ | ||
503 | Direct cache variables | ||
504 | (from Cache.handle_data) | ||
505 | """ | ||
506 | self.providers = {} | ||
507 | self.rproviders = {} | ||
508 | self.packages = {} | ||
509 | self.packages_dynamic = {} | ||
510 | self.possible_world = [] | ||
511 | self.pkg_pn = {} | ||
512 | self.pkg_fn = {} | ||
513 | self.pkg_pepvpr = {} | ||
514 | self.pkg_dp = {} | ||
515 | self.pn_provides = {} | ||
516 | self.fn_provides = {} | ||
517 | self.all_depends = [] | ||
518 | self.deps = {} | ||
519 | self.rundeps = {} | ||
520 | self.runrecs = {} | ||
521 | self.task_queues = {} | ||
522 | self.task_deps = {} | ||
523 | self.stamp = {} | ||
524 | self.preferred = {} | ||
525 | |||
526 | """ | ||
527 | Indirect Cache variables | ||
528 | (set elsewhere) | ||
529 | """ | ||
530 | self.ignored_dependencies = [] | ||
531 | self.world_target = set() | ||
532 | self.bbfile_priority = {} | ||
533 | self.bbfile_config_priorities = [] | ||
diff --git a/bitbake-dev/lib/bb/command.py b/bitbake-dev/lib/bb/command.py deleted file mode 100644 index 2bb5365c0c..0000000000 --- a/bitbake-dev/lib/bb/command.py +++ /dev/null | |||
@@ -1,271 +0,0 @@ | |||
1 | """ | ||
2 | BitBake 'Command' module | ||
3 | |||
4 | Provide an interface to interact with the bitbake server through 'commands' | ||
5 | """ | ||
6 | |||
7 | # Copyright (C) 2006-2007 Richard Purdie | ||
8 | # | ||
9 | # This program is free software; you can redistribute it and/or modify | ||
10 | # it under the terms of the GNU General Public License version 2 as | ||
11 | # published by the Free Software Foundation. | ||
12 | # | ||
13 | # This program is distributed in the hope that it will be useful, | ||
14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | # GNU General Public License for more details. | ||
17 | # | ||
18 | # You should have received a copy of the GNU General Public License along | ||
19 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
21 | |||
22 | """ | ||
23 | The bitbake server takes 'commands' from its UI/commandline. | ||
24 | Commands are either synchronous or asynchronous. | ||
25 | Async commands return data to the client in the form of events. | ||
26 | Sync commands must only return data through the function return value | ||
27 | and must not trigger events, directly or indirectly. | ||
28 | Commands are queued in a CommandQueue | ||
29 | """ | ||
30 | |||
31 | import bb | ||
32 | |||
33 | async_cmds = {} | ||
34 | sync_cmds = {} | ||
35 | |||
36 | class Command: | ||
37 | """ | ||
38 | A queue of asynchronous commands for bitbake | ||
39 | """ | ||
40 | def __init__(self, cooker): | ||
41 | |||
42 | self.cooker = cooker | ||
43 | self.cmds_sync = CommandsSync() | ||
44 | self.cmds_async = CommandsAsync() | ||
45 | |||
46 | # FIXME Add lock for this | ||
47 | self.currentAsyncCommand = None | ||
48 | |||
49 | for attr in CommandsSync.__dict__: | ||
50 | command = attr[:].lower() | ||
51 | method = getattr(CommandsSync, attr) | ||
52 | sync_cmds[command] = (method) | ||
53 | |||
54 | for attr in CommandsAsync.__dict__: | ||
55 | command = attr[:].lower() | ||
56 | method = getattr(CommandsAsync, attr) | ||
57 | async_cmds[command] = (method) | ||
58 | |||
59 | def runCommand(self, commandline): | ||
60 | try: | ||
61 | command = commandline.pop(0) | ||
62 | if command in CommandsSync.__dict__: | ||
63 | # Can run synchronous commands straight away | ||
64 | return getattr(CommandsSync, command)(self.cmds_sync, self, commandline) | ||
65 | if self.currentAsyncCommand is not None: | ||
66 | return "Busy (%s in progress)" % self.currentAsyncCommand[0] | ||
67 | if command not in CommandsAsync.__dict__: | ||
68 | return "No such command" | ||
69 | self.currentAsyncCommand = (command, commandline) | ||
70 | self.cooker.server.register_idle_function(self.cooker.runCommands, self.cooker) | ||
71 | return True | ||
72 | except: | ||
73 | import traceback | ||
74 | return traceback.format_exc() | ||
75 | |||
76 | def runAsyncCommand(self): | ||
77 | try: | ||
78 | if self.currentAsyncCommand is not None: | ||
79 | (command, options) = self.currentAsyncCommand | ||
80 | commandmethod = getattr(CommandsAsync, command) | ||
81 | needcache = getattr( commandmethod, "needcache" ) | ||
82 | if needcache and self.cooker.cookerState != bb.cooker.cookerParsed: | ||
83 | self.cooker.updateCache() | ||
84 | return True | ||
85 | else: | ||
86 | commandmethod(self.cmds_async, self, options) | ||
87 | return False | ||
88 | else: | ||
89 | return False | ||
90 | except: | ||
91 | import traceback | ||
92 | self.finishAsyncCommand(traceback.format_exc()) | ||
93 | return False | ||
94 | |||
95 | def finishAsyncCommand(self, error = None): | ||
96 | if error: | ||
97 | bb.event.fire(bb.command.CookerCommandFailed(error), self.cooker.configuration.event_data) | ||
98 | else: | ||
99 | bb.event.fire(bb.command.CookerCommandCompleted(), self.cooker.configuration.event_data) | ||
100 | self.currentAsyncCommand = None | ||
101 | |||
102 | |||
103 | class CommandsSync: | ||
104 | """ | ||
105 | A class of synchronous commands | ||
106 | These should run quickly so as not to hurt interactive performance. | ||
107 | These must not influence any running synchronous command. | ||
108 | """ | ||
109 | |||
110 | def stateShutdown(self, command, params): | ||
111 | """ | ||
112 | Trigger cooker 'shutdown' mode | ||
113 | """ | ||
114 | command.cooker.cookerAction = bb.cooker.cookerShutdown | ||
115 | |||
116 | def stateStop(self, command, params): | ||
117 | """ | ||
118 | Stop the cooker | ||
119 | """ | ||
120 | command.cooker.cookerAction = bb.cooker.cookerStop | ||
121 | |||
122 | def getCmdLineAction(self, command, params): | ||
123 | """ | ||
124 | Get any command parsed from the commandline | ||
125 | """ | ||
126 | return command.cooker.commandlineAction | ||
127 | |||
128 | def getVariable(self, command, params): | ||
129 | """ | ||
130 | Read the value of a variable from configuration.data | ||
131 | """ | ||
132 | varname = params[0] | ||
133 | expand = True | ||
134 | if len(params) > 1: | ||
135 | expand = params[1] | ||
136 | |||
137 | return bb.data.getVar(varname, command.cooker.configuration.data, expand) | ||
138 | |||
139 | def setVariable(self, command, params): | ||
140 | """ | ||
141 | Set the value of variable in configuration.data | ||
142 | """ | ||
143 | varname = params[0] | ||
144 | value = params[1] | ||
145 | bb.data.setVar(varname, value, command.cooker.configuration.data) | ||
146 | |||
147 | |||
148 | class CommandsAsync: | ||
149 | """ | ||
150 | A class of asynchronous commands | ||
151 | These functions communicate via generated events. | ||
152 | Any function that requires metadata parsing should be here. | ||
153 | """ | ||
154 | |||
155 | def buildFile(self, command, params): | ||
156 | """ | ||
157 | Build a single specified .bb file | ||
158 | """ | ||
159 | bfile = params[0] | ||
160 | task = params[1] | ||
161 | |||
162 | command.cooker.buildFile(bfile, task) | ||
163 | buildFile.needcache = False | ||
164 | |||
165 | def buildTargets(self, command, params): | ||
166 | """ | ||
167 | Build a set of targets | ||
168 | """ | ||
169 | pkgs_to_build = params[0] | ||
170 | task = params[1] | ||
171 | |||
172 | command.cooker.buildTargets(pkgs_to_build, task) | ||
173 | buildTargets.needcache = True | ||
174 | |||
175 | def generateDepTreeEvent(self, command, params): | ||
176 | """ | ||
177 | Generate an event containing the dependency information | ||
178 | """ | ||
179 | pkgs_to_build = params[0] | ||
180 | task = params[1] | ||
181 | |||
182 | command.cooker.generateDepTreeEvent(pkgs_to_build, task) | ||
183 | command.finishAsyncCommand() | ||
184 | generateDepTreeEvent.needcache = True | ||
185 | |||
186 | def generateDotGraph(self, command, params): | ||
187 | """ | ||
188 | Dump dependency information to disk as .dot files | ||
189 | """ | ||
190 | pkgs_to_build = params[0] | ||
191 | task = params[1] | ||
192 | |||
193 | command.cooker.generateDotGraphFiles(pkgs_to_build, task) | ||
194 | command.finishAsyncCommand() | ||
195 | generateDotGraph.needcache = True | ||
196 | |||
197 | def showVersions(self, command, params): | ||
198 | """ | ||
199 | Show the currently selected versions | ||
200 | """ | ||
201 | command.cooker.showVersions() | ||
202 | command.finishAsyncCommand() | ||
203 | showVersions.needcache = True | ||
204 | |||
205 | def showEnvironmentTarget(self, command, params): | ||
206 | """ | ||
207 | Print the environment of a target recipe | ||
208 | (needs the cache to work out which recipe to use) | ||
209 | """ | ||
210 | pkg = params[0] | ||
211 | |||
212 | command.cooker.showEnvironment(None, pkg) | ||
213 | command.finishAsyncCommand() | ||
214 | showEnvironmentTarget.needcache = True | ||
215 | |||
216 | def showEnvironment(self, command, params): | ||
217 | """ | ||
218 | Print the standard environment | ||
219 | or if specified the environment for a specified recipe | ||
220 | """ | ||
221 | bfile = params[0] | ||
222 | |||
223 | command.cooker.showEnvironment(bfile) | ||
224 | command.finishAsyncCommand() | ||
225 | showEnvironment.needcache = False | ||
226 | |||
227 | def parseFiles(self, command, params): | ||
228 | """ | ||
229 | Parse the .bb files | ||
230 | """ | ||
231 | command.cooker.updateCache() | ||
232 | command.finishAsyncCommand() | ||
233 | parseFiles.needcache = True | ||
234 | |||
235 | def compareRevisions(self, command, params): | ||
236 | """ | ||
237 | Parse the .bb files | ||
238 | """ | ||
239 | command.cooker.compareRevisions() | ||
240 | command.finishAsyncCommand() | ||
241 | compareRevisions.needcache = True | ||
242 | |||
243 | # | ||
244 | # Events | ||
245 | # | ||
246 | class CookerCommandCompleted(bb.event.Event): | ||
247 | """ | ||
248 | Cooker command completed | ||
249 | """ | ||
250 | def __init__(self): | ||
251 | bb.event.Event.__init__(self) | ||
252 | |||
253 | |||
254 | class CookerCommandFailed(bb.event.Event): | ||
255 | """ | ||
256 | Cooker command completed | ||
257 | """ | ||
258 | def __init__(self, error): | ||
259 | bb.event.Event.__init__(self) | ||
260 | self.error = error | ||
261 | |||
262 | class CookerCommandSetExitCode(bb.event.Event): | ||
263 | """ | ||
264 | Set the exit code for a cooker command | ||
265 | """ | ||
266 | def __init__(self, exitcode): | ||
267 | bb.event.Event.__init__(self) | ||
268 | self.exitcode = int(exitcode) | ||
269 | |||
270 | |||
271 | |||
diff --git a/bitbake-dev/lib/bb/cooker.py b/bitbake-dev/lib/bb/cooker.py deleted file mode 100644 index 8036d7e9d5..0000000000 --- a/bitbake-dev/lib/bb/cooker.py +++ /dev/null | |||
@@ -1,978 +0,0 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # ex:ts=4:sw=4:sts=4:et | ||
3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
4 | # | ||
5 | # Copyright (C) 2003, 2004 Chris Larson | ||
6 | # Copyright (C) 2003, 2004 Phil Blundell | ||
7 | # Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer | ||
8 | # Copyright (C) 2005 Holger Hans Peter Freyther | ||
9 | # Copyright (C) 2005 ROAD GmbH | ||
10 | # Copyright (C) 2006 - 2007 Richard Purdie | ||
11 | # | ||
12 | # This program is free software; you can redistribute it and/or modify | ||
13 | # it under the terms of the GNU General Public License version 2 as | ||
14 | # published by the Free Software Foundation. | ||
15 | # | ||
16 | # This program is distributed in the hope that it will be useful, | ||
17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | # GNU General Public License for more details. | ||
20 | # | ||
21 | # You should have received a copy of the GNU General Public License along | ||
22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
24 | |||
25 | import sys, os, getopt, glob, copy, os.path, re, time | ||
26 | import bb | ||
27 | from bb import utils, data, parse, event, cache, providers, taskdata, runqueue | ||
28 | from bb import command | ||
29 | import bb.server.xmlrpc | ||
30 | import itertools, sre_constants | ||
31 | |||
32 | class MultipleMatches(Exception): | ||
33 | """ | ||
34 | Exception raised when multiple file matches are found | ||
35 | """ | ||
36 | |||
37 | class ParsingErrorsFound(Exception): | ||
38 | """ | ||
39 | Exception raised when parsing errors are found | ||
40 | """ | ||
41 | |||
42 | class NothingToBuild(Exception): | ||
43 | """ | ||
44 | Exception raised when there is nothing to build | ||
45 | """ | ||
46 | |||
47 | |||
48 | # Different states cooker can be in | ||
49 | cookerClean = 1 | ||
50 | cookerParsing = 2 | ||
51 | cookerParsed = 3 | ||
52 | |||
53 | # Different action states the cooker can be in | ||
54 | cookerRun = 1 # Cooker is running normally | ||
55 | cookerShutdown = 2 # Active tasks should be brought to a controlled stop | ||
56 | cookerStop = 3 # Stop, now! | ||
57 | |||
58 | #============================================================================# | ||
59 | # BBCooker | ||
60 | #============================================================================# | ||
61 | class BBCooker: | ||
62 | """ | ||
63 | Manages one bitbake build run | ||
64 | """ | ||
65 | |||
66 | def __init__(self, configuration, server): | ||
67 | self.status = None | ||
68 | |||
69 | self.cache = None | ||
70 | self.bb_cache = None | ||
71 | |||
72 | self.server = server.BitBakeServer(self) | ||
73 | |||
74 | self.configuration = configuration | ||
75 | |||
76 | if self.configuration.verbose: | ||
77 | bb.msg.set_verbose(True) | ||
78 | |||
79 | if self.configuration.debug: | ||
80 | bb.msg.set_debug_level(self.configuration.debug) | ||
81 | else: | ||
82 | bb.msg.set_debug_level(0) | ||
83 | |||
84 | if self.configuration.debug_domains: | ||
85 | bb.msg.set_debug_domains(self.configuration.debug_domains) | ||
86 | |||
87 | self.configuration.data = bb.data.init() | ||
88 | |||
89 | bb.data.inheritFromOS(self.configuration.data) | ||
90 | |||
91 | for f in self.configuration.file: | ||
92 | self.parseConfigurationFile( f ) | ||
93 | |||
94 | self.parseConfigurationFile( os.path.join( "conf", "bitbake.conf" ) ) | ||
95 | |||
96 | if not self.configuration.cmd: | ||
97 | self.configuration.cmd = bb.data.getVar("BB_DEFAULT_TASK", self.configuration.data, True) or "build" | ||
98 | |||
99 | bbpkgs = bb.data.getVar('BBPKGS', self.configuration.data, True) | ||
100 | if bbpkgs and len(self.configuration.pkgs_to_build) == 0: | ||
101 | self.configuration.pkgs_to_build.extend(bbpkgs.split()) | ||
102 | |||
103 | # | ||
104 | # Special updated configuration we use for firing events | ||
105 | # | ||
106 | self.configuration.event_data = bb.data.createCopy(self.configuration.data) | ||
107 | bb.data.update_data(self.configuration.event_data) | ||
108 | |||
109 | # TOSTOP must not be set or our children will hang when they output | ||
110 | fd = sys.stdout.fileno() | ||
111 | if os.isatty(fd): | ||
112 | import termios | ||
113 | tcattr = termios.tcgetattr(fd) | ||
114 | if tcattr[3] & termios.TOSTOP: | ||
115 | bb.msg.note(1, bb.msg.domain.Build, "The terminal had the TOSTOP bit set, clearing...") | ||
116 | tcattr[3] = tcattr[3] & ~termios.TOSTOP | ||
117 | termios.tcsetattr(fd, termios.TCSANOW, tcattr) | ||
118 | |||
119 | self.command = bb.command.Command(self) | ||
120 | self.cookerState = cookerClean | ||
121 | self.cookerAction = cookerRun | ||
122 | |||
123 | def parseConfiguration(self): | ||
124 | |||
125 | |||
126 | # Change nice level if we're asked to | ||
127 | nice = bb.data.getVar("BB_NICE_LEVEL", self.configuration.data, True) | ||
128 | if nice: | ||
129 | curnice = os.nice(0) | ||
130 | nice = int(nice) - curnice | ||
131 | bb.msg.note(2, bb.msg.domain.Build, "Renice to %s " % os.nice(nice)) | ||
132 | |||
133 | def parseCommandLine(self): | ||
134 | # Parse any commandline into actions | ||
135 | if self.configuration.show_environment: | ||
136 | self.commandlineAction = None | ||
137 | |||
138 | if 'world' in self.configuration.pkgs_to_build: | ||
139 | bb.error("'world' is not a valid target for --environment.") | ||
140 | elif len(self.configuration.pkgs_to_build) > 1: | ||
141 | bb.error("Only one target can be used with the --environment option.") | ||
142 | elif self.configuration.buildfile and len(self.configuration.pkgs_to_build) > 0: | ||
143 | bb.error("No target should be used with the --environment and --buildfile options.") | ||
144 | elif len(self.configuration.pkgs_to_build) > 0: | ||
145 | self.commandlineAction = ["showEnvironmentTarget", self.configuration.pkgs_to_build] | ||
146 | else: | ||
147 | self.commandlineAction = ["showEnvironment", self.configuration.buildfile] | ||
148 | elif self.configuration.buildfile is not None: | ||
149 | self.commandlineAction = ["buildFile", self.configuration.buildfile, self.configuration.cmd] | ||
150 | elif self.configuration.revisions_changed: | ||
151 | self.commandlineAction = ["compareRevisions"] | ||
152 | elif self.configuration.show_versions: | ||
153 | self.commandlineAction = ["showVersions"] | ||
154 | elif self.configuration.parse_only: | ||
155 | self.commandlineAction = ["parseFiles"] | ||
156 | # FIXME - implement | ||
157 | #elif self.configuration.interactive: | ||
158 | # self.interactiveMode() | ||
159 | elif self.configuration.dot_graph: | ||
160 | if self.configuration.pkgs_to_build: | ||
161 | self.commandlineAction = ["generateDotGraph", self.configuration.pkgs_to_build, self.configuration.cmd] | ||
162 | else: | ||
163 | self.commandlineAction = None | ||
164 | bb.error("Please specify a package name for dependency graph generation.") | ||
165 | else: | ||
166 | if self.configuration.pkgs_to_build: | ||
167 | self.commandlineAction = ["buildTargets", self.configuration.pkgs_to_build, self.configuration.cmd] | ||
168 | else: | ||
169 | self.commandlineAction = None | ||
170 | bb.error("Nothing to do. Use 'bitbake world' to build everything, or run 'bitbake --help' for usage information.") | ||
171 | |||
172 | def runCommands(self, server, data, abort): | ||
173 | """ | ||
174 | Run any queued asynchronous command | ||
175 | This is done by the idle handler so it runs in true context rather than | ||
176 | tied to any UI. | ||
177 | """ | ||
178 | |||
179 | return self.command.runAsyncCommand() | ||
180 | |||
181 | def tryBuildPackage(self, fn, item, task, the_data): | ||
182 | """ | ||
183 | Build one task of a package, optionally build following task depends | ||
184 | """ | ||
185 | try: | ||
186 | if not self.configuration.dry_run: | ||
187 | bb.build.exec_task('do_%s' % task, the_data) | ||
188 | return True | ||
189 | except bb.build.FuncFailed: | ||
190 | bb.msg.error(bb.msg.domain.Build, "task stack execution failed") | ||
191 | raise | ||
192 | except bb.build.EventException, e: | ||
193 | event = e.args[1] | ||
194 | bb.msg.error(bb.msg.domain.Build, "%s event exception, aborting" % bb.event.getName(event)) | ||
195 | raise | ||
196 | |||
197 | def tryBuild(self, fn, task): | ||
198 | """ | ||
199 | Build a provider and its dependencies. | ||
200 | build_depends is a list of previous build dependencies (not runtime) | ||
201 | If build_depends is empty, we're dealing with a runtime depends | ||
202 | """ | ||
203 | |||
204 | the_data = self.bb_cache.loadDataFull(fn, self.configuration.data) | ||
205 | |||
206 | item = self.status.pkg_fn[fn] | ||
207 | |||
208 | #if bb.build.stamp_is_current('do_%s' % self.configuration.cmd, the_data): | ||
209 | # return True | ||
210 | |||
211 | return self.tryBuildPackage(fn, item, task, the_data) | ||
212 | |||
213 | def showVersions(self): | ||
214 | |||
215 | # Need files parsed | ||
216 | self.updateCache() | ||
217 | |||
218 | pkg_pn = self.status.pkg_pn | ||
219 | preferred_versions = {} | ||
220 | latest_versions = {} | ||
221 | |||
222 | # Sort by priority | ||
223 | for pn in pkg_pn.keys(): | ||
224 | (last_ver,last_file,pref_ver,pref_file) = bb.providers.findBestProvider(pn, self.configuration.data, self.status) | ||
225 | preferred_versions[pn] = (pref_ver, pref_file) | ||
226 | latest_versions[pn] = (last_ver, last_file) | ||
227 | |||
228 | pkg_list = pkg_pn.keys() | ||
229 | pkg_list.sort() | ||
230 | |||
231 | bb.msg.plain("%-35s %25s %25s" % ("Package Name", "Latest Version", "Preferred Version")) | ||
232 | bb.msg.plain("%-35s %25s %25s\n" % ("============", "==============", "=================")) | ||
233 | |||
234 | for p in pkg_list: | ||
235 | pref = preferred_versions[p] | ||
236 | latest = latest_versions[p] | ||
237 | |||
238 | prefstr = pref[0][0] + ":" + pref[0][1] + '-' + pref[0][2] | ||
239 | lateststr = latest[0][0] + ":" + latest[0][1] + "-" + latest[0][2] | ||
240 | |||
241 | if pref == latest: | ||
242 | prefstr = "" | ||
243 | |||
244 | bb.msg.plain("%-35s %25s %25s" % (p, lateststr, prefstr)) | ||
245 | |||
246 | def compareRevisions(self): | ||
247 | ret = bb.fetch.fetcher_compare_revisons(self.configuration.data) | ||
248 | bb.event.fire(bb.command.CookerCommandSetExitCode(ret), self.configuration.event_data) | ||
249 | |||
250 | def showEnvironment(self, buildfile = None, pkgs_to_build = []): | ||
251 | """ | ||
252 | Show the outer or per-package environment | ||
253 | """ | ||
254 | fn = None | ||
255 | envdata = None | ||
256 | |||
257 | if buildfile: | ||
258 | self.cb = None | ||
259 | self.bb_cache = bb.cache.init(self) | ||
260 | fn = self.matchFile(buildfile) | ||
261 | elif len(pkgs_to_build) == 1: | ||
262 | self.updateCache() | ||
263 | |||
264 | localdata = data.createCopy(self.configuration.data) | ||
265 | bb.data.update_data(localdata) | ||
266 | bb.data.expandKeys(localdata) | ||
267 | |||
268 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | ||
269 | taskdata.add_provider(localdata, self.status, pkgs_to_build[0]) | ||
270 | taskdata.add_unresolved(localdata, self.status) | ||
271 | |||
272 | targetid = taskdata.getbuild_id(pkgs_to_build[0]) | ||
273 | fnid = taskdata.build_targets[targetid][0] | ||
274 | fn = taskdata.fn_index[fnid] | ||
275 | else: | ||
276 | envdata = self.configuration.data | ||
277 | |||
278 | if fn: | ||
279 | try: | ||
280 | envdata = self.bb_cache.loadDataFull(fn, self.configuration.data) | ||
281 | except IOError, e: | ||
282 | bb.msg.error(bb.msg.domain.Parsing, "Unable to read %s: %s" % (fn, e)) | ||
283 | raise | ||
284 | except Exception, e: | ||
285 | bb.msg.error(bb.msg.domain.Parsing, "%s" % e) | ||
286 | raise | ||
287 | |||
288 | class dummywrite: | ||
289 | def __init__(self): | ||
290 | self.writebuf = "" | ||
291 | def write(self, output): | ||
292 | self.writebuf = self.writebuf + output | ||
293 | |||
294 | # emit variables and shell functions | ||
295 | try: | ||
296 | data.update_data(envdata) | ||
297 | wb = dummywrite() | ||
298 | data.emit_env(wb, envdata, True) | ||
299 | bb.msg.plain(wb.writebuf) | ||
300 | except Exception, e: | ||
301 | bb.msg.fatal(bb.msg.domain.Parsing, "%s" % e) | ||
302 | # emit the metadata which isnt valid shell | ||
303 | data.expandKeys(envdata) | ||
304 | for e in envdata.keys(): | ||
305 | if data.getVarFlag( e, 'python', envdata ): | ||
306 | bb.msg.plain("\npython %s () {\n%s}\n" % (e, data.getVar(e, envdata, 1))) | ||
307 | |||
308 | def generateDepTreeData(self, pkgs_to_build, task): | ||
309 | """ | ||
310 | Create a dependency tree of pkgs_to_build, returning the data. | ||
311 | """ | ||
312 | |||
313 | # Need files parsed | ||
314 | self.updateCache() | ||
315 | |||
316 | # If we are told to do the None task then query the default task | ||
317 | if (task == None): | ||
318 | task = self.configuration.cmd | ||
319 | |||
320 | pkgs_to_build = self.checkPackages(pkgs_to_build) | ||
321 | |||
322 | localdata = data.createCopy(self.configuration.data) | ||
323 | bb.data.update_data(localdata) | ||
324 | bb.data.expandKeys(localdata) | ||
325 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | ||
326 | |||
327 | runlist = [] | ||
328 | for k in pkgs_to_build: | ||
329 | taskdata.add_provider(localdata, self.status, k) | ||
330 | runlist.append([k, "do_%s" % task]) | ||
331 | taskdata.add_unresolved(localdata, self.status) | ||
332 | |||
333 | rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) | ||
334 | rq.prepare_runqueue() | ||
335 | |||
336 | seen_fnids = [] | ||
337 | depend_tree = {} | ||
338 | depend_tree["depends"] = {} | ||
339 | depend_tree["tdepends"] = {} | ||
340 | depend_tree["pn"] = {} | ||
341 | depend_tree["rdepends-pn"] = {} | ||
342 | depend_tree["packages"] = {} | ||
343 | depend_tree["rdepends-pkg"] = {} | ||
344 | depend_tree["rrecs-pkg"] = {} | ||
345 | |||
346 | for task in range(len(rq.runq_fnid)): | ||
347 | taskname = rq.runq_task[task] | ||
348 | fnid = rq.runq_fnid[task] | ||
349 | fn = taskdata.fn_index[fnid] | ||
350 | pn = self.status.pkg_fn[fn] | ||
351 | version = "%s:%s-%s" % self.status.pkg_pepvpr[fn] | ||
352 | if pn not in depend_tree["pn"]: | ||
353 | depend_tree["pn"][pn] = {} | ||
354 | depend_tree["pn"][pn]["filename"] = fn | ||
355 | depend_tree["pn"][pn]["version"] = version | ||
356 | for dep in rq.runq_depends[task]: | ||
357 | depfn = taskdata.fn_index[rq.runq_fnid[dep]] | ||
358 | deppn = self.status.pkg_fn[depfn] | ||
359 | dotname = "%s.%s" % (pn, rq.runq_task[task]) | ||
360 | if not dotname in depend_tree["tdepends"]: | ||
361 | depend_tree["tdepends"][dotname] = [] | ||
362 | depend_tree["tdepends"][dotname].append("%s.%s" % (deppn, rq.runq_task[dep])) | ||
363 | if fnid not in seen_fnids: | ||
364 | seen_fnids.append(fnid) | ||
365 | packages = [] | ||
366 | |||
367 | depend_tree["depends"][pn] = [] | ||
368 | for dep in taskdata.depids[fnid]: | ||
369 | depend_tree["depends"][pn].append(taskdata.build_names_index[dep]) | ||
370 | |||
371 | depend_tree["rdepends-pn"][pn] = [] | ||
372 | for rdep in taskdata.rdepids[fnid]: | ||
373 | depend_tree["rdepends-pn"][pn].append(taskdata.run_names_index[rdep]) | ||
374 | |||
375 | rdepends = self.status.rundeps[fn] | ||
376 | for package in rdepends: | ||
377 | depend_tree["rdepends-pkg"][package] = [] | ||
378 | for rdepend in rdepends[package]: | ||
379 | depend_tree["rdepends-pkg"][package].append(rdepend) | ||
380 | packages.append(package) | ||
381 | |||
382 | rrecs = self.status.runrecs[fn] | ||
383 | for package in rrecs: | ||
384 | depend_tree["rrecs-pkg"][package] = [] | ||
385 | for rdepend in rrecs[package]: | ||
386 | depend_tree["rrecs-pkg"][package].append(rdepend) | ||
387 | if not package in packages: | ||
388 | packages.append(package) | ||
389 | |||
390 | for package in packages: | ||
391 | if package not in depend_tree["packages"]: | ||
392 | depend_tree["packages"][package] = {} | ||
393 | depend_tree["packages"][package]["pn"] = pn | ||
394 | depend_tree["packages"][package]["filename"] = fn | ||
395 | depend_tree["packages"][package]["version"] = version | ||
396 | |||
397 | return depend_tree | ||
398 | |||
399 | |||
400 | def generateDepTreeEvent(self, pkgs_to_build, task): | ||
401 | """ | ||
402 | Create a task dependency graph of pkgs_to_build. | ||
403 | Generate an event with the result | ||
404 | """ | ||
405 | depgraph = self.generateDepTreeData(pkgs_to_build, task) | ||
406 | bb.event.fire(bb.event.DepTreeGenerated(depgraph), self.configuration.data) | ||
407 | |||
408 | def generateDotGraphFiles(self, pkgs_to_build, task): | ||
409 | """ | ||
410 | Create a task dependency graph of pkgs_to_build. | ||
411 | Save the result to a set of .dot files. | ||
412 | """ | ||
413 | |||
414 | depgraph = self.generateDepTreeData(pkgs_to_build, task) | ||
415 | |||
416 | # Prints a flattened form of package-depends below where subpackages of a package are merged into the main pn | ||
417 | depends_file = file('pn-depends.dot', 'w' ) | ||
418 | print >> depends_file, "digraph depends {" | ||
419 | for pn in depgraph["pn"]: | ||
420 | fn = depgraph["pn"][pn]["filename"] | ||
421 | version = depgraph["pn"][pn]["version"] | ||
422 | print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn) | ||
423 | for pn in depgraph["depends"]: | ||
424 | for depend in depgraph["depends"][pn]: | ||
425 | print >> depends_file, '"%s" -> "%s"' % (pn, depend) | ||
426 | for pn in depgraph["rdepends-pn"]: | ||
427 | for rdepend in depgraph["rdepends-pn"][pn]: | ||
428 | print >> depends_file, '"%s" -> "%s" [style=dashed]' % (pn, rdepend) | ||
429 | print >> depends_file, "}" | ||
430 | bb.msg.plain("PN dependencies saved to 'pn-depends.dot'") | ||
431 | |||
432 | depends_file = file('package-depends.dot', 'w' ) | ||
433 | print >> depends_file, "digraph depends {" | ||
434 | for package in depgraph["packages"]: | ||
435 | pn = depgraph["packages"][package]["pn"] | ||
436 | fn = depgraph["packages"][package]["filename"] | ||
437 | version = depgraph["packages"][package]["version"] | ||
438 | if package == pn: | ||
439 | print >> depends_file, '"%s" [label="%s %s\\n%s"]' % (pn, pn, version, fn) | ||
440 | else: | ||
441 | print >> depends_file, '"%s" [label="%s(%s) %s\\n%s"]' % (package, package, pn, version, fn) | ||
442 | for depend in depgraph["depends"][pn]: | ||
443 | print >> depends_file, '"%s" -> "%s"' % (package, depend) | ||
444 | for package in depgraph["rdepends-pkg"]: | ||
445 | for rdepend in depgraph["rdepends-pkg"][package]: | ||
446 | print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend) | ||
447 | for package in depgraph["rrecs-pkg"]: | ||
448 | for rdepend in depgraph["rrecs-pkg"][package]: | ||
449 | print >> depends_file, '"%s" -> "%s" [style=dashed]' % (package, rdepend) | ||
450 | print >> depends_file, "}" | ||
451 | bb.msg.plain("Package dependencies saved to 'package-depends.dot'") | ||
452 | |||
453 | tdepends_file = file('task-depends.dot', 'w' ) | ||
454 | print >> tdepends_file, "digraph depends {" | ||
455 | for task in depgraph["tdepends"]: | ||
456 | (pn, taskname) = task.rsplit(".", 1) | ||
457 | fn = depgraph["pn"][pn]["filename"] | ||
458 | version = depgraph["pn"][pn]["version"] | ||
459 | print >> tdepends_file, '"%s.%s" [label="%s %s\\n%s\\n%s"]' % (pn, taskname, pn, taskname, version, fn) | ||
460 | for dep in depgraph["tdepends"][task]: | ||
461 | print >> tdepends_file, '"%s" -> "%s"' % (task, dep) | ||
462 | print >> tdepends_file, "}" | ||
463 | bb.msg.plain("Task dependencies saved to 'task-depends.dot'") | ||
464 | |||
465 | def buildDepgraph( self ): | ||
466 | all_depends = self.status.all_depends | ||
467 | pn_provides = self.status.pn_provides | ||
468 | |||
469 | localdata = data.createCopy(self.configuration.data) | ||
470 | bb.data.update_data(localdata) | ||
471 | bb.data.expandKeys(localdata) | ||
472 | |||
473 | def calc_bbfile_priority(filename): | ||
474 | for (regex, pri) in self.status.bbfile_config_priorities: | ||
475 | if regex.match(filename): | ||
476 | return pri | ||
477 | return 0 | ||
478 | |||
479 | # Handle PREFERRED_PROVIDERS | ||
480 | for p in (bb.data.getVar('PREFERRED_PROVIDERS', localdata, 1) or "").split(): | ||
481 | try: | ||
482 | (providee, provider) = p.split(':') | ||
483 | except: | ||
484 | bb.msg.fatal(bb.msg.domain.Provider, "Malformed option in PREFERRED_PROVIDERS variable: %s" % p) | ||
485 | continue | ||
486 | if providee in self.status.preferred and self.status.preferred[providee] != provider: | ||
487 | bb.msg.error(bb.msg.domain.Provider, "conflicting preferences for %s: both %s and %s specified" % (providee, provider, self.status.preferred[providee])) | ||
488 | self.status.preferred[providee] = provider | ||
489 | |||
490 | # Calculate priorities for each file | ||
491 | for p in self.status.pkg_fn.keys(): | ||
492 | self.status.bbfile_priority[p] = calc_bbfile_priority(p) | ||
493 | |||
494 | def buildWorldTargetList(self): | ||
495 | """ | ||
496 | Build package list for "bitbake world" | ||
497 | """ | ||
498 | all_depends = self.status.all_depends | ||
499 | pn_provides = self.status.pn_provides | ||
500 | bb.msg.debug(1, bb.msg.domain.Parsing, "collating packages for \"world\"") | ||
501 | for f in self.status.possible_world: | ||
502 | terminal = True | ||
503 | pn = self.status.pkg_fn[f] | ||
504 | |||
505 | for p in pn_provides[pn]: | ||
506 | if p.startswith('virtual/'): | ||
507 | bb.msg.debug(2, bb.msg.domain.Parsing, "World build skipping %s due to %s provider starting with virtual/" % (f, p)) | ||
508 | terminal = False | ||
509 | break | ||
510 | for pf in self.status.providers[p]: | ||
511 | if self.status.pkg_fn[pf] != pn: | ||
512 | bb.msg.debug(2, bb.msg.domain.Parsing, "World build skipping %s due to both us and %s providing %s" % (f, pf, p)) | ||
513 | terminal = False | ||
514 | break | ||
515 | if terminal: | ||
516 | self.status.world_target.add(pn) | ||
517 | |||
518 | # drop reference count now | ||
519 | self.status.possible_world = None | ||
520 | self.status.all_depends = None | ||
521 | |||
522 | def interactiveMode( self ): | ||
523 | """Drop off into a shell""" | ||
524 | try: | ||
525 | from bb import shell | ||
526 | except ImportError, details: | ||
527 | bb.msg.fatal(bb.msg.domain.Parsing, "Sorry, shell not available (%s)" % details ) | ||
528 | else: | ||
529 | shell.start( self ) | ||
530 | |||
531 | def parseConfigurationFile( self, afile ): | ||
532 | try: | ||
533 | self.configuration.data = bb.parse.handle( afile, self.configuration.data ) | ||
534 | |||
535 | # Handle any INHERITs and inherit the base class | ||
536 | inherits = ["base"] + (bb.data.getVar('INHERIT', self.configuration.data, True ) or "").split() | ||
537 | for inherit in inherits: | ||
538 | self.configuration.data = bb.parse.handle(os.path.join('classes', '%s.bbclass' % inherit), self.configuration.data, True ) | ||
539 | |||
540 | # Nomally we only register event handlers at the end of parsing .bb files | ||
541 | # We register any handlers we've found so far here... | ||
542 | for var in data.getVar('__BBHANDLERS', self.configuration.data) or []: | ||
543 | bb.event.register(var,bb.data.getVar(var, self.configuration.data)) | ||
544 | |||
545 | bb.fetch.fetcher_init(self.configuration.data) | ||
546 | |||
547 | bb.event.fire(bb.event.ConfigParsed(), self.configuration.data) | ||
548 | |||
549 | except IOError, e: | ||
550 | bb.msg.fatal(bb.msg.domain.Parsing, "Error when parsing %s: %s" % (afile, str(e))) | ||
551 | except bb.parse.ParseError, details: | ||
552 | bb.msg.fatal(bb.msg.domain.Parsing, "Unable to parse %s (%s)" % (afile, details) ) | ||
553 | |||
554 | def handleCollections( self, collections ): | ||
555 | """Handle collections""" | ||
556 | if collections: | ||
557 | collection_list = collections.split() | ||
558 | for c in collection_list: | ||
559 | regex = bb.data.getVar("BBFILE_PATTERN_%s" % c, self.configuration.data, 1) | ||
560 | if regex == None: | ||
561 | bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PATTERN_%s not defined" % c) | ||
562 | continue | ||
563 | priority = bb.data.getVar("BBFILE_PRIORITY_%s" % c, self.configuration.data, 1) | ||
564 | if priority == None: | ||
565 | bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PRIORITY_%s not defined" % c) | ||
566 | continue | ||
567 | try: | ||
568 | cre = re.compile(regex) | ||
569 | except re.error: | ||
570 | bb.msg.error(bb.msg.domain.Parsing, "BBFILE_PATTERN_%s \"%s\" is not a valid regular expression" % (c, regex)) | ||
571 | continue | ||
572 | try: | ||
573 | pri = int(priority) | ||
574 | self.status.bbfile_config_priorities.append((cre, pri)) | ||
575 | except ValueError: | ||
576 | bb.msg.error(bb.msg.domain.Parsing, "invalid value for BBFILE_PRIORITY_%s: \"%s\"" % (c, priority)) | ||
577 | |||
578 | def buildSetVars(self): | ||
579 | """ | ||
580 | Setup any variables needed before starting a build | ||
581 | """ | ||
582 | if not bb.data.getVar("BUILDNAME", self.configuration.data): | ||
583 | bb.data.setVar("BUILDNAME", os.popen('date +%Y%m%d%H%M').readline().strip(), self.configuration.data) | ||
584 | bb.data.setVar("BUILDSTART", time.strftime('%m/%d/%Y %H:%M:%S',time.gmtime()), self.configuration.data) | ||
585 | |||
586 | def matchFiles(self, buildfile): | ||
587 | """ | ||
588 | Find the .bb files which match the expression in 'buildfile'. | ||
589 | """ | ||
590 | |||
591 | bf = os.path.abspath(buildfile) | ||
592 | try: | ||
593 | os.stat(bf) | ||
594 | return [bf] | ||
595 | except OSError: | ||
596 | (filelist, masked) = self.collect_bbfiles() | ||
597 | regexp = re.compile(buildfile) | ||
598 | matches = [] | ||
599 | for f in filelist: | ||
600 | if regexp.search(f) and os.path.isfile(f): | ||
601 | bf = f | ||
602 | matches.append(f) | ||
603 | return matches | ||
604 | |||
605 | def matchFile(self, buildfile): | ||
606 | """ | ||
607 | Find the .bb file which matches the expression in 'buildfile'. | ||
608 | Raise an error if multiple files | ||
609 | """ | ||
610 | matches = self.matchFiles(buildfile) | ||
611 | if len(matches) != 1: | ||
612 | bb.msg.error(bb.msg.domain.Parsing, "Unable to match %s (%s matches found):" % (buildfile, len(matches))) | ||
613 | for f in matches: | ||
614 | bb.msg.error(bb.msg.domain.Parsing, " %s" % f) | ||
615 | raise MultipleMatches | ||
616 | return matches[0] | ||
617 | |||
618 | def buildFile(self, buildfile, task): | ||
619 | """ | ||
620 | Build the file matching regexp buildfile | ||
621 | """ | ||
622 | |||
623 | # Parse the configuration here. We need to do it explicitly here since | ||
624 | # buildFile() doesn't use the cache | ||
625 | self.parseConfiguration() | ||
626 | |||
627 | # If we are told to do the None task then query the default task | ||
628 | if (task == None): | ||
629 | task = self.configuration.cmd | ||
630 | |||
631 | fn = self.matchFile(buildfile) | ||
632 | self.buildSetVars() | ||
633 | |||
634 | # Load data into the cache for fn and parse the loaded cache data | ||
635 | self.bb_cache = bb.cache.init(self) | ||
636 | self.status = bb.cache.CacheData() | ||
637 | self.bb_cache.loadData(fn, self.configuration.data, self.status) | ||
638 | |||
639 | # Tweak some variables | ||
640 | item = self.bb_cache.getVar('PN', fn, True) | ||
641 | self.status.ignored_dependencies = set() | ||
642 | self.status.bbfile_priority[fn] = 1 | ||
643 | |||
644 | # Remove external dependencies | ||
645 | self.status.task_deps[fn]['depends'] = {} | ||
646 | self.status.deps[fn] = [] | ||
647 | self.status.rundeps[fn] = [] | ||
648 | self.status.runrecs[fn] = [] | ||
649 | |||
650 | # Remove stamp for target if force mode active | ||
651 | if self.configuration.force: | ||
652 | bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (task, fn)) | ||
653 | bb.build.del_stamp('do_%s' % task, self.status, fn) | ||
654 | |||
655 | # Setup taskdata structure | ||
656 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | ||
657 | taskdata.add_provider(self.configuration.data, self.status, item) | ||
658 | |||
659 | buildname = bb.data.getVar("BUILDNAME", self.configuration.data) | ||
660 | bb.event.fire(bb.event.BuildStarted(buildname, [item]), self.configuration.event_data) | ||
661 | |||
662 | # Execute the runqueue | ||
663 | runlist = [[item, "do_%s" % task]] | ||
664 | |||
665 | rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) | ||
666 | |||
667 | def buildFileIdle(server, rq, abort): | ||
668 | |||
669 | if abort or self.cookerAction == cookerStop: | ||
670 | rq.finish_runqueue(True) | ||
671 | elif self.cookerAction == cookerShutdown: | ||
672 | rq.finish_runqueue(False) | ||
673 | failures = 0 | ||
674 | try: | ||
675 | retval = rq.execute_runqueue() | ||
676 | except runqueue.TaskFailure, fnids: | ||
677 | for fnid in fnids: | ||
678 | bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid]) | ||
679 | failures = failures + 1 | ||
680 | retval = False | ||
681 | if not retval: | ||
682 | self.command.finishAsyncCommand() | ||
683 | bb.event.fire(bb.event.BuildCompleted(buildname, item, failures), self.configuration.event_data) | ||
684 | return False | ||
685 | return 0.5 | ||
686 | |||
687 | self.server.register_idle_function(buildFileIdle, rq) | ||
688 | |||
689 | def buildTargets(self, targets, task): | ||
690 | """ | ||
691 | Attempt to build the targets specified | ||
692 | """ | ||
693 | |||
694 | # Need files parsed | ||
695 | self.updateCache() | ||
696 | |||
697 | # If we are told to do the NULL task then query the default task | ||
698 | if (task == None): | ||
699 | task = self.configuration.cmd | ||
700 | |||
701 | targets = self.checkPackages(targets) | ||
702 | |||
703 | def buildTargetsIdle(server, rq, abort): | ||
704 | |||
705 | if abort or self.cookerAction == cookerStop: | ||
706 | rq.finish_runqueue(True) | ||
707 | elif self.cookerAction == cookerShutdown: | ||
708 | rq.finish_runqueue(False) | ||
709 | failures = 0 | ||
710 | try: | ||
711 | retval = rq.execute_runqueue() | ||
712 | except runqueue.TaskFailure, fnids: | ||
713 | for fnid in fnids: | ||
714 | bb.msg.error(bb.msg.domain.Build, "'%s' failed" % taskdata.fn_index[fnid]) | ||
715 | failures = failures + 1 | ||
716 | retval = False | ||
717 | if not retval: | ||
718 | self.command.finishAsyncCommand() | ||
719 | bb.event.fire(bb.event.BuildCompleted(buildname, targets, failures), self.configuration.event_data) | ||
720 | return None | ||
721 | return 0.5 | ||
722 | |||
723 | self.buildSetVars() | ||
724 | |||
725 | buildname = bb.data.getVar("BUILDNAME", self.configuration.data) | ||
726 | bb.event.fire(bb.event.BuildStarted(buildname, targets), self.configuration.event_data) | ||
727 | |||
728 | localdata = data.createCopy(self.configuration.data) | ||
729 | bb.data.update_data(localdata) | ||
730 | bb.data.expandKeys(localdata) | ||
731 | |||
732 | taskdata = bb.taskdata.TaskData(self.configuration.abort) | ||
733 | |||
734 | runlist = [] | ||
735 | for k in targets: | ||
736 | taskdata.add_provider(localdata, self.status, k) | ||
737 | runlist.append([k, "do_%s" % task]) | ||
738 | taskdata.add_unresolved(localdata, self.status) | ||
739 | |||
740 | rq = bb.runqueue.RunQueue(self, self.configuration.data, self.status, taskdata, runlist) | ||
741 | |||
742 | self.server.register_idle_function(buildTargetsIdle, rq) | ||
743 | |||
744 | def updateCache(self): | ||
745 | |||
746 | if self.cookerState == cookerParsed: | ||
747 | return | ||
748 | |||
749 | if self.cookerState != cookerParsing: | ||
750 | |||
751 | self.parseConfiguration () | ||
752 | |||
753 | # Import Psyco if available and not disabled | ||
754 | import platform | ||
755 | if platform.machine() in ['i386', 'i486', 'i586', 'i686']: | ||
756 | if not self.configuration.disable_psyco: | ||
757 | try: | ||
758 | import psyco | ||
759 | except ImportError: | ||
760 | bb.msg.note(1, bb.msg.domain.Collection, "Psyco JIT Compiler (http://psyco.sf.net) not available. Install it to increase performance.") | ||
761 | else: | ||
762 | psyco.bind( CookerParser.parse_next ) | ||
763 | else: | ||
764 | bb.msg.note(1, bb.msg.domain.Collection, "You have disabled Psyco. This decreases performance.") | ||
765 | |||
766 | self.status = bb.cache.CacheData() | ||
767 | |||
768 | ignore = bb.data.getVar("ASSUME_PROVIDED", self.configuration.data, 1) or "" | ||
769 | self.status.ignored_dependencies = set(ignore.split()) | ||
770 | |||
771 | for dep in self.configuration.extra_assume_provided: | ||
772 | self.status.ignored_dependencies.add(dep) | ||
773 | |||
774 | self.handleCollections( bb.data.getVar("BBFILE_COLLECTIONS", self.configuration.data, 1) ) | ||
775 | |||
776 | bb.msg.debug(1, bb.msg.domain.Collection, "collecting .bb files") | ||
777 | (filelist, masked) = self.collect_bbfiles() | ||
778 | bb.data.renameVar("__depends", "__base_depends", self.configuration.data) | ||
779 | |||
780 | self.parser = CookerParser(self, filelist, masked) | ||
781 | self.cookerState = cookerParsing | ||
782 | |||
783 | if not self.parser.parse_next(): | ||
784 | bb.msg.debug(1, bb.msg.domain.Collection, "parsing complete") | ||
785 | self.buildDepgraph() | ||
786 | self.cookerState = cookerParsed | ||
787 | return None | ||
788 | |||
789 | return True | ||
790 | |||
791 | def checkPackages(self, pkgs_to_build): | ||
792 | |||
793 | if len(pkgs_to_build) == 0: | ||
794 | raise NothingToBuild | ||
795 | |||
796 | if 'world' in pkgs_to_build: | ||
797 | self.buildWorldTargetList() | ||
798 | pkgs_to_build.remove('world') | ||
799 | for t in self.status.world_target: | ||
800 | pkgs_to_build.append(t) | ||
801 | |||
802 | return pkgs_to_build | ||
803 | |||
804 | def get_bbfiles( self, path = os.getcwd() ): | ||
805 | """Get list of default .bb files by reading out the current directory""" | ||
806 | contents = os.listdir(path) | ||
807 | bbfiles = [] | ||
808 | for f in contents: | ||
809 | (root, ext) = os.path.splitext(f) | ||
810 | if ext == ".bb": | ||
811 | bbfiles.append(os.path.abspath(os.path.join(os.getcwd(),f))) | ||
812 | return bbfiles | ||
813 | |||
814 | def find_bbfiles( self, path ): | ||
815 | """Find all the .bb files in a directory""" | ||
816 | from os.path import join | ||
817 | |||
818 | found = [] | ||
819 | for dir, dirs, files in os.walk(path): | ||
820 | for ignored in ('SCCS', 'CVS', '.svn'): | ||
821 | if ignored in dirs: | ||
822 | dirs.remove(ignored) | ||
823 | found += [join(dir,f) for f in files if f.endswith('.bb')] | ||
824 | |||
825 | return found | ||
826 | |||
827 | def collect_bbfiles( self ): | ||
828 | """Collect all available .bb build files""" | ||
829 | parsed, cached, skipped, masked = 0, 0, 0, 0 | ||
830 | self.bb_cache = bb.cache.init(self) | ||
831 | |||
832 | files = (data.getVar( "BBFILES", self.configuration.data, 1 ) or "").split() | ||
833 | data.setVar("BBFILES", " ".join(files), self.configuration.data) | ||
834 | |||
835 | if not len(files): | ||
836 | files = self.get_bbfiles() | ||
837 | |||
838 | if not len(files): | ||
839 | bb.msg.error(bb.msg.domain.Collection, "no files to build.") | ||
840 | |||
841 | newfiles = [] | ||
842 | for f in files: | ||
843 | if os.path.isdir(f): | ||
844 | dirfiles = self.find_bbfiles(f) | ||
845 | if dirfiles: | ||
846 | newfiles += dirfiles | ||
847 | continue | ||
848 | else: | ||
849 | globbed = glob.glob(f) | ||
850 | if not globbed and os.path.exists(f): | ||
851 | globbed = [f] | ||
852 | newfiles += globbed | ||
853 | |||
854 | bbmask = bb.data.getVar('BBMASK', self.configuration.data, 1) | ||
855 | |||
856 | if not bbmask: | ||
857 | return (newfiles, 0) | ||
858 | |||
859 | try: | ||
860 | bbmask_compiled = re.compile(bbmask) | ||
861 | except sre_constants.error: | ||
862 | bb.msg.fatal(bb.msg.domain.Collection, "BBMASK is not a valid regular expression.") | ||
863 | |||
864 | finalfiles = [] | ||
865 | for f in newfiles: | ||
866 | if bbmask_compiled.search(f): | ||
867 | bb.msg.debug(1, bb.msg.domain.Collection, "skipping masked file %s" % f) | ||
868 | masked += 1 | ||
869 | continue | ||
870 | finalfiles.append(f) | ||
871 | |||
872 | return (finalfiles, masked) | ||
873 | |||
874 | def serve(self): | ||
875 | |||
876 | # Empty the environment. The environment will be populated as | ||
877 | # necessary from the data store. | ||
878 | bb.utils.empty_environment() | ||
879 | |||
880 | if self.configuration.profile: | ||
881 | try: | ||
882 | import cProfile as profile | ||
883 | except: | ||
884 | import profile | ||
885 | |||
886 | profile.runctx("self.server.serve_forever()", globals(), locals(), "profile.log") | ||
887 | |||
888 | # Redirect stdout to capture profile information | ||
889 | pout = open('profile.log.processed', 'w') | ||
890 | so = sys.stdout.fileno() | ||
891 | os.dup2(pout.fileno(), so) | ||
892 | |||
893 | import pstats | ||
894 | p = pstats.Stats('profile.log') | ||
895 | p.sort_stats('time') | ||
896 | p.print_stats() | ||
897 | p.print_callers() | ||
898 | p.sort_stats('cumulative') | ||
899 | p.print_stats() | ||
900 | |||
901 | os.dup2(so, pout.fileno()) | ||
902 | pout.flush() | ||
903 | pout.close() | ||
904 | else: | ||
905 | self.server.serve_forever() | ||
906 | |||
907 | bb.event.fire(CookerExit(), self.configuration.event_data) | ||
908 | |||
909 | class CookerExit(bb.event.Event): | ||
910 | """ | ||
911 | Notify clients of the Cooker shutdown | ||
912 | """ | ||
913 | |||
914 | def __init__(self): | ||
915 | bb.event.Event.__init__(self) | ||
916 | |||
917 | class CookerParser: | ||
918 | def __init__(self, cooker, filelist, masked): | ||
919 | # Internal data | ||
920 | self.filelist = filelist | ||
921 | self.cooker = cooker | ||
922 | |||
923 | # Accounting statistics | ||
924 | self.parsed = 0 | ||
925 | self.cached = 0 | ||
926 | self.error = 0 | ||
927 | self.masked = masked | ||
928 | self.total = len(filelist) | ||
929 | |||
930 | self.skipped = 0 | ||
931 | self.virtuals = 0 | ||
932 | |||
933 | # Pointer to the next file to parse | ||
934 | self.pointer = 0 | ||
935 | |||
936 | def parse_next(self): | ||
937 | if self.pointer < len(self.filelist): | ||
938 | f = self.filelist[self.pointer] | ||
939 | cooker = self.cooker | ||
940 | |||
941 | try: | ||
942 | fromCache, skipped, virtuals = cooker.bb_cache.loadData(f, cooker.configuration.data, cooker.status) | ||
943 | if fromCache: | ||
944 | self.cached += 1 | ||
945 | else: | ||
946 | self.parsed += 1 | ||
947 | |||
948 | self.skipped += skipped | ||
949 | self.virtuals += virtuals | ||
950 | |||
951 | except IOError, e: | ||
952 | self.error += 1 | ||
953 | cooker.bb_cache.remove(f) | ||
954 | bb.msg.error(bb.msg.domain.Collection, "opening %s: %s" % (f, e)) | ||
955 | pass | ||
956 | except KeyboardInterrupt: | ||
957 | cooker.bb_cache.remove(f) | ||
958 | cooker.bb_cache.sync() | ||
959 | raise | ||
960 | except Exception, e: | ||
961 | self.error += 1 | ||
962 | cooker.bb_cache.remove(f) | ||
963 | bb.msg.error(bb.msg.domain.Collection, "%s while parsing %s" % (e, f)) | ||
964 | except: | ||
965 | cooker.bb_cache.remove(f) | ||
966 | raise | ||
967 | finally: | ||
968 | bb.event.fire(bb.event.ParseProgress(self.cached, self.parsed, self.skipped, self.masked, self.virtuals, self.error, self.total), cooker.configuration.event_data) | ||
969 | |||
970 | self.pointer += 1 | ||
971 | |||
972 | if self.pointer >= self.total: | ||
973 | cooker.bb_cache.sync() | ||
974 | if self.error > 0: | ||
975 | raise ParsingErrorsFound | ||
976 | return False | ||
977 | return True | ||
978 | |||
diff --git a/bitbake-dev/lib/bb/daemonize.py b/bitbake-dev/lib/bb/daemonize.py deleted file mode 100644 index 1a8bb379f4..0000000000 --- a/bitbake-dev/lib/bb/daemonize.py +++ /dev/null | |||
@@ -1,191 +0,0 @@ | |||
1 | """ | ||
2 | Python Deamonizing helper | ||
3 | |||
4 | Configurable daemon behaviors: | ||
5 | |||
6 | 1.) The current working directory set to the "/" directory. | ||
7 | 2.) The current file creation mode mask set to 0. | ||
8 | 3.) Close all open files (1024). | ||
9 | 4.) Redirect standard I/O streams to "/dev/null". | ||
10 | |||
11 | A failed call to fork() now raises an exception. | ||
12 | |||
13 | References: | ||
14 | 1) Advanced Programming in the Unix Environment: W. Richard Stevens | ||
15 | 2) Unix Programming Frequently Asked Questions: | ||
16 | http://www.erlenstar.demon.co.uk/unix/faq_toc.html | ||
17 | |||
18 | Modified to allow a function to be daemonized and return for | ||
19 | bitbake use by Richard Purdie | ||
20 | """ | ||
21 | |||
22 | __author__ = "Chad J. Schroeder" | ||
23 | __copyright__ = "Copyright (C) 2005 Chad J. Schroeder" | ||
24 | __version__ = "0.2" | ||
25 | |||
26 | # Standard Python modules. | ||
27 | import os # Miscellaneous OS interfaces. | ||
28 | import sys # System-specific parameters and functions. | ||
29 | |||
30 | # Default daemon parameters. | ||
31 | # File mode creation mask of the daemon. | ||
32 | # For BitBake's children, we do want to inherit the parent umask. | ||
33 | UMASK = None | ||
34 | |||
35 | # Default maximum for the number of available file descriptors. | ||
36 | MAXFD = 1024 | ||
37 | |||
38 | # The standard I/O file descriptors are redirected to /dev/null by default. | ||
39 | if (hasattr(os, "devnull")): | ||
40 | REDIRECT_TO = os.devnull | ||
41 | else: | ||
42 | REDIRECT_TO = "/dev/null" | ||
43 | |||
44 | def createDaemon(function, logfile): | ||
45 | """ | ||
46 | Detach a process from the controlling terminal and run it in the | ||
47 | background as a daemon, returning control to the caller. | ||
48 | """ | ||
49 | |||
50 | try: | ||
51 | # Fork a child process so the parent can exit. This returns control to | ||
52 | # the command-line or shell. It also guarantees that the child will not | ||
53 | # be a process group leader, since the child receives a new process ID | ||
54 | # and inherits the parent's process group ID. This step is required | ||
55 | # to insure that the next call to os.setsid is successful. | ||
56 | pid = os.fork() | ||
57 | except OSError, e: | ||
58 | raise Exception, "%s [%d]" % (e.strerror, e.errno) | ||
59 | |||
60 | if (pid == 0): # The first child. | ||
61 | # To become the session leader of this new session and the process group | ||
62 | # leader of the new process group, we call os.setsid(). The process is | ||
63 | # also guaranteed not to have a controlling terminal. | ||
64 | os.setsid() | ||
65 | |||
66 | # Is ignoring SIGHUP necessary? | ||
67 | # | ||
68 | # It's often suggested that the SIGHUP signal should be ignored before | ||
69 | # the second fork to avoid premature termination of the process. The | ||
70 | # reason is that when the first child terminates, all processes, e.g. | ||
71 | # the second child, in the orphaned group will be sent a SIGHUP. | ||
72 | # | ||
73 | # "However, as part of the session management system, there are exactly | ||
74 | # two cases where SIGHUP is sent on the death of a process: | ||
75 | # | ||
76 | # 1) When the process that dies is the session leader of a session that | ||
77 | # is attached to a terminal device, SIGHUP is sent to all processes | ||
78 | # in the foreground process group of that terminal device. | ||
79 | # 2) When the death of a process causes a process group to become | ||
80 | # orphaned, and one or more processes in the orphaned group are | ||
81 | # stopped, then SIGHUP and SIGCONT are sent to all members of the | ||
82 | # orphaned group." [2] | ||
83 | # | ||
84 | # The first case can be ignored since the child is guaranteed not to have | ||
85 | # a controlling terminal. The second case isn't so easy to dismiss. | ||
86 | # The process group is orphaned when the first child terminates and | ||
87 | # POSIX.1 requires that every STOPPED process in an orphaned process | ||
88 | # group be sent a SIGHUP signal followed by a SIGCONT signal. Since the | ||
89 | # second child is not STOPPED though, we can safely forego ignoring the | ||
90 | # SIGHUP signal. In any case, there are no ill-effects if it is ignored. | ||
91 | # | ||
92 | # import signal # Set handlers for asynchronous events. | ||
93 | # signal.signal(signal.SIGHUP, signal.SIG_IGN) | ||
94 | |||
95 | try: | ||
96 | # Fork a second child and exit immediately to prevent zombies. This | ||
97 | # causes the second child process to be orphaned, making the init | ||
98 | # process responsible for its cleanup. And, since the first child is | ||
99 | # a session leader without a controlling terminal, it's possible for | ||
100 | # it to acquire one by opening a terminal in the future (System V- | ||
101 | # based systems). This second fork guarantees that the child is no | ||
102 | # longer a session leader, preventing the daemon from ever acquiring | ||
103 | # a controlling terminal. | ||
104 | pid = os.fork() # Fork a second child. | ||
105 | except OSError, e: | ||
106 | raise Exception, "%s [%d]" % (e.strerror, e.errno) | ||
107 | |||
108 | if (pid == 0): # The second child. | ||
109 | # We probably don't want the file mode creation mask inherited from | ||
110 | # the parent, so we give the child complete control over permissions. | ||
111 | if UMASK is not None: | ||
112 | os.umask(UMASK) | ||
113 | else: | ||
114 | # Parent (the first child) of the second child. | ||
115 | os._exit(0) | ||
116 | else: | ||
117 | # exit() or _exit()? | ||
118 | # _exit is like exit(), but it doesn't call any functions registered | ||
119 | # with atexit (and on_exit) or any registered signal handlers. It also | ||
120 | # closes any open file descriptors. Using exit() may cause all stdio | ||
121 | # streams to be flushed twice and any temporary files may be unexpectedly | ||
122 | # removed. It's therefore recommended that child branches of a fork() | ||
123 | # and the parent branch(es) of a daemon use _exit(). | ||
124 | return | ||
125 | |||
126 | # Close all open file descriptors. This prevents the child from keeping | ||
127 | # open any file descriptors inherited from the parent. There is a variety | ||
128 | # of methods to accomplish this task. Three are listed below. | ||
129 | # | ||
130 | # Try the system configuration variable, SC_OPEN_MAX, to obtain the maximum | ||
131 | # number of open file descriptors to close. If it doesn't exists, use | ||
132 | # the default value (configurable). | ||
133 | # | ||
134 | # try: | ||
135 | # maxfd = os.sysconf("SC_OPEN_MAX") | ||
136 | # except (AttributeError, ValueError): | ||
137 | # maxfd = MAXFD | ||
138 | # | ||
139 | # OR | ||
140 | # | ||
141 | # if (os.sysconf_names.has_key("SC_OPEN_MAX")): | ||
142 | # maxfd = os.sysconf("SC_OPEN_MAX") | ||
143 | # else: | ||
144 | # maxfd = MAXFD | ||
145 | # | ||
146 | # OR | ||
147 | # | ||
148 | # Use the getrlimit method to retrieve the maximum file descriptor number | ||
149 | # that can be opened by this process. If there is not limit on the | ||
150 | # resource, use the default value. | ||
151 | # | ||
152 | import resource # Resource usage information. | ||
153 | maxfd = resource.getrlimit(resource.RLIMIT_NOFILE)[1] | ||
154 | if (maxfd == resource.RLIM_INFINITY): | ||
155 | maxfd = MAXFD | ||
156 | |||
157 | # Iterate through and close all file descriptors. | ||
158 | # for fd in range(0, maxfd): | ||
159 | # try: | ||
160 | # os.close(fd) | ||
161 | # except OSError: # ERROR, fd wasn't open to begin with (ignored) | ||
162 | # pass | ||
163 | |||
164 | # Redirect the standard I/O file descriptors to the specified file. Since | ||
165 | # the daemon has no controlling terminal, most daemons redirect stdin, | ||
166 | # stdout, and stderr to /dev/null. This is done to prevent side-effects | ||
167 | # from reads and writes to the standard I/O file descriptors. | ||
168 | |||
169 | # This call to open is guaranteed to return the lowest file descriptor, | ||
170 | # which will be 0 (stdin), since it was closed above. | ||
171 | # os.open(REDIRECT_TO, os.O_RDWR) # standard input (0) | ||
172 | |||
173 | # Duplicate standard input to standard output and standard error. | ||
174 | # os.dup2(0, 1) # standard output (1) | ||
175 | # os.dup2(0, 2) # standard error (2) | ||
176 | |||
177 | |||
178 | si = file('/dev/null', 'r') | ||
179 | so = file(logfile, 'w') | ||
180 | se = so | ||
181 | |||
182 | |||
183 | # Replace those fds with our own | ||
184 | os.dup2(si.fileno(), sys.stdin.fileno()) | ||
185 | os.dup2(so.fileno(), sys.stdout.fileno()) | ||
186 | os.dup2(se.fileno(), sys.stderr.fileno()) | ||
187 | |||
188 | function() | ||
189 | |||
190 | os._exit(0) | ||
191 | |||
diff --git a/bitbake-dev/lib/bb/data.py b/bitbake-dev/lib/bb/data.py deleted file mode 100644 index d3058b9a1d..0000000000 --- a/bitbake-dev/lib/bb/data.py +++ /dev/null | |||
@@ -1,562 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Data' implementations | ||
5 | |||
6 | Functions for interacting with the data structure used by the | ||
7 | BitBake build tools. | ||
8 | |||
9 | The expandData and update_data are the most expensive | ||
10 | operations. At night the cookie monster came by and | ||
11 | suggested 'give me cookies on setting the variables and | ||
12 | things will work out'. Taking this suggestion into account | ||
13 | applying the skills from the not yet passed 'Entwurf und | ||
14 | Analyse von Algorithmen' lecture and the cookie | ||
15 | monster seems to be right. We will track setVar more carefully | ||
16 | to have faster update_data and expandKeys operations. | ||
17 | |||
18 | This is a treade-off between speed and memory again but | ||
19 | the speed is more critical here. | ||
20 | """ | ||
21 | |||
22 | # Copyright (C) 2003, 2004 Chris Larson | ||
23 | # Copyright (C) 2005 Holger Hans Peter Freyther | ||
24 | # | ||
25 | # This program is free software; you can redistribute it and/or modify | ||
26 | # it under the terms of the GNU General Public License version 2 as | ||
27 | # published by the Free Software Foundation. | ||
28 | # | ||
29 | # This program is distributed in the hope that it will be useful, | ||
30 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
31 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
32 | # GNU General Public License for more details. | ||
33 | # | ||
34 | # You should have received a copy of the GNU General Public License along | ||
35 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
36 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
37 | # | ||
38 | #Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
39 | |||
40 | import sys, os, re, types | ||
41 | if sys.argv[0][-5:] == "pydoc": | ||
42 | path = os.path.dirname(os.path.dirname(sys.argv[1])) | ||
43 | else: | ||
44 | path = os.path.dirname(os.path.dirname(sys.argv[0])) | ||
45 | sys.path.insert(0,path) | ||
46 | |||
47 | from bb import data_smart | ||
48 | import bb | ||
49 | |||
50 | _dict_type = data_smart.DataSmart | ||
51 | |||
52 | def init(): | ||
53 | return _dict_type() | ||
54 | |||
55 | def init_db(parent = None): | ||
56 | if parent: | ||
57 | return parent.createCopy() | ||
58 | else: | ||
59 | return _dict_type() | ||
60 | |||
61 | def createCopy(source): | ||
62 | """Link the source set to the destination | ||
63 | If one does not find the value in the destination set, | ||
64 | search will go on to the source set to get the value. | ||
65 | Value from source are copy-on-write. i.e. any try to | ||
66 | modify one of them will end up putting the modified value | ||
67 | in the destination set. | ||
68 | """ | ||
69 | return source.createCopy() | ||
70 | |||
71 | def initVar(var, d): | ||
72 | """Non-destructive var init for data structure""" | ||
73 | d.initVar(var) | ||
74 | |||
75 | |||
76 | def setVar(var, value, d): | ||
77 | """Set a variable to a given value | ||
78 | |||
79 | Example: | ||
80 | >>> d = init() | ||
81 | >>> setVar('TEST', 'testcontents', d) | ||
82 | >>> print getVar('TEST', d) | ||
83 | testcontents | ||
84 | """ | ||
85 | d.setVar(var,value) | ||
86 | |||
87 | |||
88 | def getVar(var, d, exp = 0): | ||
89 | """Gets the value of a variable | ||
90 | |||
91 | Example: | ||
92 | >>> d = init() | ||
93 | >>> setVar('TEST', 'testcontents', d) | ||
94 | >>> print getVar('TEST', d) | ||
95 | testcontents | ||
96 | """ | ||
97 | return d.getVar(var,exp) | ||
98 | |||
99 | |||
100 | def renameVar(key, newkey, d): | ||
101 | """Renames a variable from key to newkey | ||
102 | |||
103 | Example: | ||
104 | >>> d = init() | ||
105 | >>> setVar('TEST', 'testcontents', d) | ||
106 | >>> renameVar('TEST', 'TEST2', d) | ||
107 | >>> print getVar('TEST2', d) | ||
108 | testcontents | ||
109 | """ | ||
110 | d.renameVar(key, newkey) | ||
111 | |||
112 | def delVar(var, d): | ||
113 | """Removes a variable from the data set | ||
114 | |||
115 | Example: | ||
116 | >>> d = init() | ||
117 | >>> setVar('TEST', 'testcontents', d) | ||
118 | >>> print getVar('TEST', d) | ||
119 | testcontents | ||
120 | >>> delVar('TEST', d) | ||
121 | >>> print getVar('TEST', d) | ||
122 | None | ||
123 | """ | ||
124 | d.delVar(var) | ||
125 | |||
126 | def setVarFlag(var, flag, flagvalue, d): | ||
127 | """Set a flag for a given variable to a given value | ||
128 | |||
129 | Example: | ||
130 | >>> d = init() | ||
131 | >>> setVarFlag('TEST', 'python', 1, d) | ||
132 | >>> print getVarFlag('TEST', 'python', d) | ||
133 | 1 | ||
134 | """ | ||
135 | d.setVarFlag(var,flag,flagvalue) | ||
136 | |||
137 | def getVarFlag(var, flag, d): | ||
138 | """Gets given flag from given var | ||
139 | |||
140 | Example: | ||
141 | >>> d = init() | ||
142 | >>> setVarFlag('TEST', 'python', 1, d) | ||
143 | >>> print getVarFlag('TEST', 'python', d) | ||
144 | 1 | ||
145 | """ | ||
146 | return d.getVarFlag(var,flag) | ||
147 | |||
148 | def delVarFlag(var, flag, d): | ||
149 | """Removes a given flag from the variable's flags | ||
150 | |||
151 | Example: | ||
152 | >>> d = init() | ||
153 | >>> setVarFlag('TEST', 'testflag', 1, d) | ||
154 | >>> print getVarFlag('TEST', 'testflag', d) | ||
155 | 1 | ||
156 | >>> delVarFlag('TEST', 'testflag', d) | ||
157 | >>> print getVarFlag('TEST', 'testflag', d) | ||
158 | None | ||
159 | |||
160 | """ | ||
161 | d.delVarFlag(var,flag) | ||
162 | |||
163 | def setVarFlags(var, flags, d): | ||
164 | """Set the flags for a given variable | ||
165 | |||
166 | Note: | ||
167 | setVarFlags will not clear previous | ||
168 | flags. Think of this method as | ||
169 | addVarFlags | ||
170 | |||
171 | Example: | ||
172 | >>> d = init() | ||
173 | >>> myflags = {} | ||
174 | >>> myflags['test'] = 'blah' | ||
175 | >>> setVarFlags('TEST', myflags, d) | ||
176 | >>> print getVarFlag('TEST', 'test', d) | ||
177 | blah | ||
178 | """ | ||
179 | d.setVarFlags(var,flags) | ||
180 | |||
181 | def getVarFlags(var, d): | ||
182 | """Gets a variable's flags | ||
183 | |||
184 | Example: | ||
185 | >>> d = init() | ||
186 | >>> setVarFlag('TEST', 'test', 'blah', d) | ||
187 | >>> print getVarFlags('TEST', d)['test'] | ||
188 | blah | ||
189 | """ | ||
190 | return d.getVarFlags(var) | ||
191 | |||
192 | def delVarFlags(var, d): | ||
193 | """Removes a variable's flags | ||
194 | |||
195 | Example: | ||
196 | >>> data = init() | ||
197 | >>> setVarFlag('TEST', 'testflag', 1, data) | ||
198 | >>> print getVarFlag('TEST', 'testflag', data) | ||
199 | 1 | ||
200 | >>> delVarFlags('TEST', data) | ||
201 | >>> print getVarFlags('TEST', data) | ||
202 | None | ||
203 | |||
204 | """ | ||
205 | d.delVarFlags(var) | ||
206 | |||
207 | def keys(d): | ||
208 | """Return a list of keys in d | ||
209 | |||
210 | Example: | ||
211 | >>> d = init() | ||
212 | >>> setVar('TEST', 1, d) | ||
213 | >>> setVar('MOO' , 2, d) | ||
214 | >>> setVarFlag('TEST', 'test', 1, d) | ||
215 | >>> keys(d) | ||
216 | ['TEST', 'MOO'] | ||
217 | """ | ||
218 | return d.keys() | ||
219 | |||
220 | def getData(d): | ||
221 | """Returns the data object used""" | ||
222 | return d | ||
223 | |||
224 | def setData(newData, d): | ||
225 | """Sets the data object to the supplied value""" | ||
226 | d = newData | ||
227 | |||
228 | |||
229 | ## | ||
230 | ## Cookie Monsters' query functions | ||
231 | ## | ||
232 | def _get_override_vars(d, override): | ||
233 | """ | ||
234 | Internal!!! | ||
235 | |||
236 | Get the Names of Variables that have a specific | ||
237 | override. This function returns a iterable | ||
238 | Set or an empty list | ||
239 | """ | ||
240 | return [] | ||
241 | |||
242 | def _get_var_flags_triple(d): | ||
243 | """ | ||
244 | Internal!!! | ||
245 | |||
246 | """ | ||
247 | return [] | ||
248 | |||
249 | __expand_var_regexp__ = re.compile(r"\${[^{}]+}") | ||
250 | __expand_python_regexp__ = re.compile(r"\${@.+?}") | ||
251 | |||
252 | def expand(s, d, varname = None): | ||
253 | """Variable expansion using the data store. | ||
254 | |||
255 | Example: | ||
256 | Standard expansion: | ||
257 | >>> d = init() | ||
258 | >>> setVar('A', 'sshd', d) | ||
259 | >>> print expand('/usr/bin/${A}', d) | ||
260 | /usr/bin/sshd | ||
261 | |||
262 | Python expansion: | ||
263 | >>> d = init() | ||
264 | >>> print expand('result: ${@37 * 72}', d) | ||
265 | result: 2664 | ||
266 | |||
267 | Shell expansion: | ||
268 | >>> d = init() | ||
269 | >>> print expand('${TARGET_MOO}', d) | ||
270 | ${TARGET_MOO} | ||
271 | >>> setVar('TARGET_MOO', 'yupp', d) | ||
272 | >>> print expand('${TARGET_MOO}',d) | ||
273 | yupp | ||
274 | >>> setVar('SRC_URI', 'http://somebug.${TARGET_MOO}', d) | ||
275 | >>> delVar('TARGET_MOO', d) | ||
276 | >>> print expand('${SRC_URI}', d) | ||
277 | http://somebug.${TARGET_MOO} | ||
278 | """ | ||
279 | return d.expand(s, varname) | ||
280 | |||
281 | def expandKeys(alterdata, readdata = None): | ||
282 | if readdata == None: | ||
283 | readdata = alterdata | ||
284 | |||
285 | todolist = {} | ||
286 | for key in keys(alterdata): | ||
287 | if not '${' in key: | ||
288 | continue | ||
289 | |||
290 | ekey = expand(key, readdata) | ||
291 | if key == ekey: | ||
292 | continue | ||
293 | todolist[key] = ekey | ||
294 | |||
295 | # These two for loops are split for performance to maximise the | ||
296 | # usefulness of the expand cache | ||
297 | |||
298 | for key in todolist: | ||
299 | ekey = todolist[key] | ||
300 | renameVar(key, ekey, alterdata) | ||
301 | |||
302 | def expandData(alterdata, readdata = None): | ||
303 | """For each variable in alterdata, expand it, and update the var contents. | ||
304 | Replacements use data from readdata. | ||
305 | |||
306 | Example: | ||
307 | >>> a=init() | ||
308 | >>> b=init() | ||
309 | >>> setVar("dlmsg", "dl_dir is ${DL_DIR}", a) | ||
310 | >>> setVar("DL_DIR", "/path/to/whatever", b) | ||
311 | >>> expandData(a, b) | ||
312 | >>> print getVar("dlmsg", a) | ||
313 | dl_dir is /path/to/whatever | ||
314 | """ | ||
315 | if readdata == None: | ||
316 | readdata = alterdata | ||
317 | |||
318 | for key in keys(alterdata): | ||
319 | val = getVar(key, alterdata) | ||
320 | if type(val) is not types.StringType: | ||
321 | continue | ||
322 | expanded = expand(val, readdata) | ||
323 | # print "key is %s, val is %s, expanded is %s" % (key, val, expanded) | ||
324 | if val != expanded: | ||
325 | setVar(key, expanded, alterdata) | ||
326 | |||
327 | def inheritFromOS(d): | ||
328 | """Inherit variables from the environment.""" | ||
329 | for s in os.environ.keys(): | ||
330 | try: | ||
331 | setVar(s, os.environ[s], d) | ||
332 | setVarFlag(s, "export", True, d) | ||
333 | except TypeError: | ||
334 | pass | ||
335 | |||
336 | def emit_var(var, o=sys.__stdout__, d = init(), all=False): | ||
337 | """Emit a variable to be sourced by a shell.""" | ||
338 | if getVarFlag(var, "python", d): | ||
339 | return 0 | ||
340 | |||
341 | export = getVarFlag(var, "export", d) | ||
342 | unexport = getVarFlag(var, "unexport", d) | ||
343 | func = getVarFlag(var, "func", d) | ||
344 | if not all and not export and not unexport and not func: | ||
345 | return 0 | ||
346 | |||
347 | try: | ||
348 | if all: | ||
349 | oval = getVar(var, d, 0) | ||
350 | val = getVar(var, d, 1) | ||
351 | except KeyboardInterrupt: | ||
352 | raise | ||
353 | except: | ||
354 | excname = str(sys.exc_info()[0]) | ||
355 | if excname == "bb.build.FuncFailed": | ||
356 | raise | ||
357 | o.write('# expansion of %s threw %s\n' % (var, excname)) | ||
358 | return 0 | ||
359 | |||
360 | if all: | ||
361 | o.write('# %s=%s\n' % (var, oval)) | ||
362 | |||
363 | if type(val) is not types.StringType: | ||
364 | return 0 | ||
365 | |||
366 | if (var.find("-") != -1 or var.find(".") != -1 or var.find('{') != -1 or var.find('}') != -1 or var.find('+') != -1) and not all: | ||
367 | return 0 | ||
368 | |||
369 | varExpanded = expand(var, d) | ||
370 | |||
371 | if unexport: | ||
372 | o.write('unset %s\n' % varExpanded) | ||
373 | return 1 | ||
374 | |||
375 | val.rstrip() | ||
376 | if not val: | ||
377 | return 0 | ||
378 | |||
379 | if func: | ||
380 | # NOTE: should probably check for unbalanced {} within the var | ||
381 | o.write("%s() {\n%s\n}\n" % (varExpanded, val)) | ||
382 | return 1 | ||
383 | |||
384 | if export: | ||
385 | o.write('export ') | ||
386 | |||
387 | # if we're going to output this within doublequotes, | ||
388 | # to a shell, we need to escape the quotes in the var | ||
389 | alter = re.sub('"', '\\"', val.strip()) | ||
390 | o.write('%s="%s"\n' % (varExpanded, alter)) | ||
391 | return 1 | ||
392 | |||
393 | |||
394 | def emit_env(o=sys.__stdout__, d = init(), all=False): | ||
395 | """Emits all items in the data store in a format such that it can be sourced by a shell.""" | ||
396 | |||
397 | env = keys(d) | ||
398 | |||
399 | for e in env: | ||
400 | if getVarFlag(e, "func", d): | ||
401 | continue | ||
402 | emit_var(e, o, d, all) and o.write('\n') | ||
403 | |||
404 | for e in env: | ||
405 | if not getVarFlag(e, "func", d): | ||
406 | continue | ||
407 | emit_var(e, o, d) and o.write('\n') | ||
408 | |||
409 | def update_data(d): | ||
410 | """Modifies the environment vars according to local overrides and commands. | ||
411 | Examples: | ||
412 | Appending to a variable: | ||
413 | >>> d = init() | ||
414 | >>> setVar('TEST', 'this is a', d) | ||
415 | >>> setVar('TEST_append', ' test', d) | ||
416 | >>> setVar('TEST_append', ' of the emergency broadcast system.', d) | ||
417 | >>> update_data(d) | ||
418 | >>> print getVar('TEST', d) | ||
419 | this is a test of the emergency broadcast system. | ||
420 | |||
421 | Prepending to a variable: | ||
422 | >>> setVar('TEST', 'virtual/libc', d) | ||
423 | >>> setVar('TEST_prepend', 'virtual/tmake ', d) | ||
424 | >>> setVar('TEST_prepend', 'virtual/patcher ', d) | ||
425 | >>> update_data(d) | ||
426 | >>> print getVar('TEST', d) | ||
427 | virtual/patcher virtual/tmake virtual/libc | ||
428 | |||
429 | Overrides: | ||
430 | >>> setVar('TEST_arm', 'target', d) | ||
431 | >>> setVar('TEST_ramses', 'machine', d) | ||
432 | >>> setVar('TEST_local', 'local', d) | ||
433 | >>> setVar('OVERRIDES', 'arm', d) | ||
434 | |||
435 | >>> setVar('TEST', 'original', d) | ||
436 | >>> update_data(d) | ||
437 | >>> print getVar('TEST', d) | ||
438 | target | ||
439 | |||
440 | >>> setVar('OVERRIDES', 'arm:ramses:local', d) | ||
441 | >>> setVar('TEST', 'original', d) | ||
442 | >>> update_data(d) | ||
443 | >>> print getVar('TEST', d) | ||
444 | local | ||
445 | |||
446 | CopyMonster: | ||
447 | >>> e = d.createCopy() | ||
448 | >>> setVar('TEST_foo', 'foo', e) | ||
449 | >>> update_data(e) | ||
450 | >>> print getVar('TEST', e) | ||
451 | local | ||
452 | |||
453 | >>> setVar('OVERRIDES', 'arm:ramses:local:foo', e) | ||
454 | >>> update_data(e) | ||
455 | >>> print getVar('TEST', e) | ||
456 | foo | ||
457 | |||
458 | >>> f = d.createCopy() | ||
459 | >>> setVar('TEST_moo', 'something', f) | ||
460 | >>> setVar('OVERRIDES', 'moo:arm:ramses:local:foo', e) | ||
461 | >>> update_data(e) | ||
462 | >>> print getVar('TEST', e) | ||
463 | foo | ||
464 | |||
465 | |||
466 | >>> h = init() | ||
467 | >>> setVar('SRC_URI', 'file://append.foo;patch=1 ', h) | ||
468 | >>> g = h.createCopy() | ||
469 | >>> setVar('SRC_URI_append_arm', 'file://other.foo;patch=1', g) | ||
470 | >>> setVar('OVERRIDES', 'arm:moo', g) | ||
471 | >>> update_data(g) | ||
472 | >>> print getVar('SRC_URI', g) | ||
473 | file://append.foo;patch=1 file://other.foo;patch=1 | ||
474 | |||
475 | """ | ||
476 | bb.msg.debug(2, bb.msg.domain.Data, "update_data()") | ||
477 | |||
478 | # now ask the cookie monster for help | ||
479 | #print "Cookie Monster" | ||
480 | #print "Append/Prepend %s" % d._special_values | ||
481 | #print "Overrides %s" % d._seen_overrides | ||
482 | |||
483 | overrides = (getVar('OVERRIDES', d, 1) or "").split(':') or [] | ||
484 | |||
485 | # | ||
486 | # Well let us see what breaks here. We used to iterate | ||
487 | # over each variable and apply the override and then | ||
488 | # do the line expanding. | ||
489 | # If we have bad luck - which we will have - the keys | ||
490 | # where in some order that is so important for this | ||
491 | # method which we don't have anymore. | ||
492 | # Anyway we will fix that and write test cases this | ||
493 | # time. | ||
494 | |||
495 | # | ||
496 | # First we apply all overrides | ||
497 | # Then we will handle _append and _prepend | ||
498 | # | ||
499 | |||
500 | for o in overrides: | ||
501 | # calculate '_'+override | ||
502 | l = len(o)+1 | ||
503 | |||
504 | # see if one should even try | ||
505 | if not d._seen_overrides.has_key(o): | ||
506 | continue | ||
507 | |||
508 | vars = d._seen_overrides[o] | ||
509 | for var in vars: | ||
510 | name = var[:-l] | ||
511 | try: | ||
512 | d[name] = d[var] | ||
513 | except: | ||
514 | bb.msg.note(1, bb.msg.domain.Data, "Untracked delVar") | ||
515 | |||
516 | # now on to the appends and prepends | ||
517 | if d._special_values.has_key('_append'): | ||
518 | appends = d._special_values['_append'] or [] | ||
519 | for append in appends: | ||
520 | for (a, o) in getVarFlag(append, '_append', d) or []: | ||
521 | # maybe the OVERRIDE was not yet added so keep the append | ||
522 | if (o and o in overrides) or not o: | ||
523 | delVarFlag(append, '_append', d) | ||
524 | if o and not o in overrides: | ||
525 | continue | ||
526 | |||
527 | sval = getVar(append,d) or "" | ||
528 | sval+=a | ||
529 | setVar(append, sval, d) | ||
530 | |||
531 | |||
532 | if d._special_values.has_key('_prepend'): | ||
533 | prepends = d._special_values['_prepend'] or [] | ||
534 | |||
535 | for prepend in prepends: | ||
536 | for (a, o) in getVarFlag(prepend, '_prepend', d) or []: | ||
537 | # maybe the OVERRIDE was not yet added so keep the prepend | ||
538 | if (o and o in overrides) or not o: | ||
539 | delVarFlag(prepend, '_prepend', d) | ||
540 | if o and not o in overrides: | ||
541 | continue | ||
542 | |||
543 | sval = a + (getVar(prepend,d) or "") | ||
544 | setVar(prepend, sval, d) | ||
545 | |||
546 | |||
547 | def inherits_class(klass, d): | ||
548 | val = getVar('__inherit_cache', d) or [] | ||
549 | if os.path.join('classes', '%s.bbclass' % klass) in val: | ||
550 | return True | ||
551 | return False | ||
552 | |||
553 | def _test(): | ||
554 | """Start a doctest run on this module""" | ||
555 | import doctest | ||
556 | import bb | ||
557 | from bb import data | ||
558 | bb.msg.set_debug_level(0) | ||
559 | doctest.testmod(data) | ||
560 | |||
561 | if __name__ == "__main__": | ||
562 | _test() | ||
diff --git a/bitbake-dev/lib/bb/data_smart.py b/bitbake-dev/lib/bb/data_smart.py deleted file mode 100644 index 988d5c3578..0000000000 --- a/bitbake-dev/lib/bb/data_smart.py +++ /dev/null | |||
@@ -1,289 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake Smart Dictionary Implementation | ||
5 | |||
6 | Functions for interacting with the data structure used by the | ||
7 | BitBake build tools. | ||
8 | |||
9 | """ | ||
10 | |||
11 | # Copyright (C) 2003, 2004 Chris Larson | ||
12 | # Copyright (C) 2004, 2005 Seb Frankengul | ||
13 | # Copyright (C) 2005, 2006 Holger Hans Peter Freyther | ||
14 | # Copyright (C) 2005 Uli Luckas | ||
15 | # Copyright (C) 2005 ROAD GmbH | ||
16 | # | ||
17 | # This program is free software; you can redistribute it and/or modify | ||
18 | # it under the terms of the GNU General Public License version 2 as | ||
19 | # published by the Free Software Foundation. | ||
20 | # | ||
21 | # This program is distributed in the hope that it will be useful, | ||
22 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
23 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
24 | # GNU General Public License for more details. | ||
25 | # | ||
26 | # You should have received a copy of the GNU General Public License along | ||
27 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
28 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
29 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
30 | |||
31 | import copy, os, re, sys, time, types | ||
32 | import bb | ||
33 | from bb import utils, methodpool | ||
34 | from COW import COWDictBase | ||
35 | from new import classobj | ||
36 | |||
37 | |||
38 | __setvar_keyword__ = ["_append","_prepend"] | ||
39 | __setvar_regexp__ = re.compile('(?P<base>.*?)(?P<keyword>_append|_prepend)(_(?P<add>.*))?') | ||
40 | __expand_var_regexp__ = re.compile(r"\${[^{}]+}") | ||
41 | __expand_python_regexp__ = re.compile(r"\${@.+?}") | ||
42 | |||
43 | |||
44 | class DataSmart: | ||
45 | def __init__(self, special = COWDictBase.copy(), seen = COWDictBase.copy() ): | ||
46 | self.dict = {} | ||
47 | |||
48 | # cookie monster tribute | ||
49 | self._special_values = special | ||
50 | self._seen_overrides = seen | ||
51 | |||
52 | self.expand_cache = {} | ||
53 | |||
54 | def expand(self,s, varname): | ||
55 | def var_sub(match): | ||
56 | key = match.group()[2:-1] | ||
57 | if varname and key: | ||
58 | if varname == key: | ||
59 | raise Exception("variable %s references itself!" % varname) | ||
60 | var = self.getVar(key, 1) | ||
61 | if var is not None: | ||
62 | return var | ||
63 | else: | ||
64 | return match.group() | ||
65 | |||
66 | def python_sub(match): | ||
67 | import bb | ||
68 | code = match.group()[3:-1] | ||
69 | locals()['d'] = self | ||
70 | s = eval(code) | ||
71 | if type(s) == types.IntType: s = str(s) | ||
72 | return s | ||
73 | |||
74 | if type(s) is not types.StringType: # sanity check | ||
75 | return s | ||
76 | |||
77 | if varname and varname in self.expand_cache: | ||
78 | return self.expand_cache[varname] | ||
79 | |||
80 | while s.find('${') != -1: | ||
81 | olds = s | ||
82 | try: | ||
83 | s = __expand_var_regexp__.sub(var_sub, s) | ||
84 | s = __expand_python_regexp__.sub(python_sub, s) | ||
85 | if s == olds: break | ||
86 | if type(s) is not types.StringType: # sanity check | ||
87 | bb.msg.error(bb.msg.domain.Data, 'expansion of %s returned non-string %s' % (olds, s)) | ||
88 | except KeyboardInterrupt: | ||
89 | raise | ||
90 | except: | ||
91 | bb.msg.note(1, bb.msg.domain.Data, "%s:%s while evaluating:\n%s" % (sys.exc_info()[0], sys.exc_info()[1], s)) | ||
92 | raise | ||
93 | |||
94 | if varname: | ||
95 | self.expand_cache[varname] = s | ||
96 | |||
97 | return s | ||
98 | |||
99 | def initVar(self, var): | ||
100 | self.expand_cache = {} | ||
101 | if not var in self.dict: | ||
102 | self.dict[var] = {} | ||
103 | |||
104 | def _findVar(self,var): | ||
105 | _dest = self.dict | ||
106 | |||
107 | while (_dest and var not in _dest): | ||
108 | if not "_data" in _dest: | ||
109 | _dest = None | ||
110 | break | ||
111 | _dest = _dest["_data"] | ||
112 | |||
113 | if _dest and var in _dest: | ||
114 | return _dest[var] | ||
115 | return None | ||
116 | |||
117 | def _makeShadowCopy(self, var): | ||
118 | if var in self.dict: | ||
119 | return | ||
120 | |||
121 | local_var = self._findVar(var) | ||
122 | |||
123 | if local_var: | ||
124 | self.dict[var] = copy.copy(local_var) | ||
125 | else: | ||
126 | self.initVar(var) | ||
127 | |||
128 | def setVar(self,var,value): | ||
129 | self.expand_cache = {} | ||
130 | match = __setvar_regexp__.match(var) | ||
131 | if match and match.group("keyword") in __setvar_keyword__: | ||
132 | base = match.group('base') | ||
133 | keyword = match.group("keyword") | ||
134 | override = match.group('add') | ||
135 | l = self.getVarFlag(base, keyword) or [] | ||
136 | l.append([value, override]) | ||
137 | self.setVarFlag(base, keyword, l) | ||
138 | |||
139 | # todo make sure keyword is not __doc__ or __module__ | ||
140 | # pay the cookie monster | ||
141 | try: | ||
142 | self._special_values[keyword].add( base ) | ||
143 | except: | ||
144 | self._special_values[keyword] = set() | ||
145 | self._special_values[keyword].add( base ) | ||
146 | |||
147 | return | ||
148 | |||
149 | if not var in self.dict: | ||
150 | self._makeShadowCopy(var) | ||
151 | |||
152 | # more cookies for the cookie monster | ||
153 | if '_' in var: | ||
154 | override = var[var.rfind('_')+1:] | ||
155 | if not self._seen_overrides.has_key(override): | ||
156 | self._seen_overrides[override] = set() | ||
157 | self._seen_overrides[override].add( var ) | ||
158 | |||
159 | # setting var | ||
160 | self.dict[var]["content"] = value | ||
161 | |||
162 | def getVar(self,var,exp): | ||
163 | value = self.getVarFlag(var,"content") | ||
164 | |||
165 | if exp and value: | ||
166 | return self.expand(value,var) | ||
167 | return value | ||
168 | |||
169 | def renameVar(self, key, newkey): | ||
170 | """ | ||
171 | Rename the variable key to newkey | ||
172 | """ | ||
173 | val = self.getVar(key, 0) | ||
174 | if val is not None: | ||
175 | self.setVar(newkey, val) | ||
176 | |||
177 | for i in ('_append', '_prepend'): | ||
178 | src = self.getVarFlag(key, i) | ||
179 | if src is None: | ||
180 | continue | ||
181 | |||
182 | dest = self.getVarFlag(newkey, i) or [] | ||
183 | dest.extend(src) | ||
184 | self.setVarFlag(newkey, i, dest) | ||
185 | |||
186 | if self._special_values.has_key(i) and key in self._special_values[i]: | ||
187 | self._special_values[i].remove(key) | ||
188 | self._special_values[i].add(newkey) | ||
189 | |||
190 | self.delVar(key) | ||
191 | |||
192 | def delVar(self,var): | ||
193 | self.expand_cache = {} | ||
194 | self.dict[var] = {} | ||
195 | |||
196 | def setVarFlag(self,var,flag,flagvalue): | ||
197 | if not var in self.dict: | ||
198 | self._makeShadowCopy(var) | ||
199 | self.dict[var][flag] = flagvalue | ||
200 | |||
201 | def getVarFlag(self,var,flag): | ||
202 | local_var = self._findVar(var) | ||
203 | if local_var: | ||
204 | if flag in local_var: | ||
205 | return copy.copy(local_var[flag]) | ||
206 | return None | ||
207 | |||
208 | def delVarFlag(self,var,flag): | ||
209 | local_var = self._findVar(var) | ||
210 | if not local_var: | ||
211 | return | ||
212 | if not var in self.dict: | ||
213 | self._makeShadowCopy(var) | ||
214 | |||
215 | if var in self.dict and flag in self.dict[var]: | ||
216 | del self.dict[var][flag] | ||
217 | |||
218 | def setVarFlags(self,var,flags): | ||
219 | if not var in self.dict: | ||
220 | self._makeShadowCopy(var) | ||
221 | |||
222 | for i in flags.keys(): | ||
223 | if i == "content": | ||
224 | continue | ||
225 | self.dict[var][i] = flags[i] | ||
226 | |||
227 | def getVarFlags(self,var): | ||
228 | local_var = self._findVar(var) | ||
229 | flags = {} | ||
230 | |||
231 | if local_var: | ||
232 | for i in local_var.keys(): | ||
233 | if i == "content": | ||
234 | continue | ||
235 | flags[i] = local_var[i] | ||
236 | |||
237 | if len(flags) == 0: | ||
238 | return None | ||
239 | return flags | ||
240 | |||
241 | |||
242 | def delVarFlags(self,var): | ||
243 | if not var in self.dict: | ||
244 | self._makeShadowCopy(var) | ||
245 | |||
246 | if var in self.dict: | ||
247 | content = None | ||
248 | |||
249 | # try to save the content | ||
250 | if "content" in self.dict[var]: | ||
251 | content = self.dict[var]["content"] | ||
252 | self.dict[var] = {} | ||
253 | self.dict[var]["content"] = content | ||
254 | else: | ||
255 | del self.dict[var] | ||
256 | |||
257 | |||
258 | def createCopy(self): | ||
259 | """ | ||
260 | Create a copy of self by setting _data to self | ||
261 | """ | ||
262 | # we really want this to be a DataSmart... | ||
263 | data = DataSmart(seen=self._seen_overrides.copy(), special=self._special_values.copy()) | ||
264 | data.dict["_data"] = self.dict | ||
265 | |||
266 | return data | ||
267 | |||
268 | # Dictionary Methods | ||
269 | def keys(self): | ||
270 | def _keys(d, mykey): | ||
271 | if "_data" in d: | ||
272 | _keys(d["_data"],mykey) | ||
273 | |||
274 | for key in d.keys(): | ||
275 | if key != "_data": | ||
276 | mykey[key] = None | ||
277 | keytab = {} | ||
278 | _keys(self.dict,keytab) | ||
279 | return keytab.keys() | ||
280 | |||
281 | def __getitem__(self,item): | ||
282 | #print "Warning deprecated" | ||
283 | return self.getVar(item, False) | ||
284 | |||
285 | def __setitem__(self,var,data): | ||
286 | #print "Warning deprecated" | ||
287 | self.setVar(var,data) | ||
288 | |||
289 | |||
diff --git a/bitbake-dev/lib/bb/event.py b/bitbake-dev/lib/bb/event.py deleted file mode 100644 index 7251d78715..0000000000 --- a/bitbake-dev/lib/bb/event.py +++ /dev/null | |||
@@ -1,275 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Event' implementation | ||
5 | |||
6 | Classes and functions for manipulating 'events' in the | ||
7 | BitBake build tools. | ||
8 | """ | ||
9 | |||
10 | # Copyright (C) 2003, 2004 Chris Larson | ||
11 | # | ||
12 | # This program is free software; you can redistribute it and/or modify | ||
13 | # it under the terms of the GNU General Public License version 2 as | ||
14 | # published by the Free Software Foundation. | ||
15 | # | ||
16 | # This program is distributed in the hope that it will be useful, | ||
17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | # GNU General Public License for more details. | ||
20 | # | ||
21 | # You should have received a copy of the GNU General Public License along | ||
22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
24 | |||
25 | import os, re | ||
26 | import bb.utils | ||
27 | import pickle | ||
28 | |||
29 | # This is the pid for which we should generate the event. This is set when | ||
30 | # the runqueue forks off. | ||
31 | worker_pid = 0 | ||
32 | worker_pipe = None | ||
33 | |||
34 | class Event: | ||
35 | """Base class for events""" | ||
36 | |||
37 | def __init__(self): | ||
38 | self.pid = worker_pid | ||
39 | |||
40 | NotHandled = 0 | ||
41 | Handled = 1 | ||
42 | |||
43 | Registered = 10 | ||
44 | AlreadyRegistered = 14 | ||
45 | |||
46 | # Internal | ||
47 | _handlers = {} | ||
48 | _ui_handlers = {} | ||
49 | _ui_handler_seq = 0 | ||
50 | |||
51 | def fire(event, d): | ||
52 | """Fire off an Event""" | ||
53 | |||
54 | if worker_pid != 0: | ||
55 | worker_fire(event, d) | ||
56 | return | ||
57 | |||
58 | for handler in _handlers: | ||
59 | h = _handlers[handler] | ||
60 | event.data = d | ||
61 | if type(h).__name__ == "code": | ||
62 | exec(h) | ||
63 | tmpHandler(event) | ||
64 | else: | ||
65 | h(event) | ||
66 | del event.data | ||
67 | |||
68 | errors = [] | ||
69 | for h in _ui_handlers: | ||
70 | #print "Sending event %s" % event | ||
71 | try: | ||
72 | # We use pickle here since it better handles object instances | ||
73 | # which xmlrpc's marshaller does not. Events *must* be serializable | ||
74 | # by pickle. | ||
75 | _ui_handlers[h].event.send((pickle.dumps(event))) | ||
76 | except: | ||
77 | errors.append(h) | ||
78 | for h in errors: | ||
79 | del _ui_handlers[h] | ||
80 | |||
81 | def worker_fire(event, d): | ||
82 | data = "<event>" + pickle.dumps(event) + "</event>" | ||
83 | if os.write(worker_pipe, data) != len (data): | ||
84 | print "Error sending event to server (short write)" | ||
85 | |||
86 | def fire_from_worker(event, d): | ||
87 | if not event.startswith("<event>") or not event.endswith("</event>"): | ||
88 | print "Error, not an event" | ||
89 | return | ||
90 | event = pickle.loads(event[7:-8]) | ||
91 | bb.event.fire(event, d) | ||
92 | |||
93 | def register(name, handler): | ||
94 | """Register an Event handler""" | ||
95 | |||
96 | # already registered | ||
97 | if name in _handlers: | ||
98 | return AlreadyRegistered | ||
99 | |||
100 | if handler is not None: | ||
101 | # handle string containing python code | ||
102 | if type(handler).__name__ == "str": | ||
103 | tmp = "def tmpHandler(e):\n%s" % handler | ||
104 | comp = bb.utils.better_compile(tmp, "tmpHandler(e)", "bb.event._registerCode") | ||
105 | _handlers[name] = comp | ||
106 | else: | ||
107 | _handlers[name] = handler | ||
108 | |||
109 | return Registered | ||
110 | |||
111 | def remove(name, handler): | ||
112 | """Remove an Event handler""" | ||
113 | _handlers.pop(name) | ||
114 | |||
115 | def register_UIHhandler(handler): | ||
116 | bb.event._ui_handler_seq = bb.event._ui_handler_seq + 1 | ||
117 | _ui_handlers[_ui_handler_seq] = handler | ||
118 | return _ui_handler_seq | ||
119 | |||
120 | def unregister_UIHhandler(handlerNum): | ||
121 | if handlerNum in _ui_handlers: | ||
122 | del _ui_handlers[handlerNum] | ||
123 | return | ||
124 | |||
125 | def getName(e): | ||
126 | """Returns the name of a class or class instance""" | ||
127 | if getattr(e, "__name__", None) == None: | ||
128 | return e.__class__.__name__ | ||
129 | else: | ||
130 | return e.__name__ | ||
131 | |||
132 | class ConfigParsed(Event): | ||
133 | """Configuration Parsing Complete""" | ||
134 | |||
135 | class RecipeParsed(Event): | ||
136 | """ Recipe Parsing Complete """ | ||
137 | |||
138 | def __init__(self, fn): | ||
139 | self.fn = fn | ||
140 | Event.__init__(self) | ||
141 | |||
142 | class StampUpdate(Event): | ||
143 | """Trigger for any adjustment of the stamp files to happen""" | ||
144 | |||
145 | def __init__(self, targets, stampfns): | ||
146 | self._targets = targets | ||
147 | self._stampfns = stampfns | ||
148 | Event.__init__(self) | ||
149 | |||
150 | def getStampPrefix(self): | ||
151 | return self._stampfns | ||
152 | |||
153 | def getTargets(self): | ||
154 | return self._targets | ||
155 | |||
156 | stampPrefix = property(getStampPrefix) | ||
157 | targets = property(getTargets) | ||
158 | |||
159 | class BuildBase(Event): | ||
160 | """Base class for bbmake run events""" | ||
161 | |||
162 | def __init__(self, n, p, failures = 0): | ||
163 | self._name = n | ||
164 | self._pkgs = p | ||
165 | Event.__init__(self) | ||
166 | self._failures = failures | ||
167 | |||
168 | def getPkgs(self): | ||
169 | return self._pkgs | ||
170 | |||
171 | def setPkgs(self, pkgs): | ||
172 | self._pkgs = pkgs | ||
173 | |||
174 | def getName(self): | ||
175 | return self._name | ||
176 | |||
177 | def setName(self, name): | ||
178 | self._name = name | ||
179 | |||
180 | def getCfg(self): | ||
181 | return self.data | ||
182 | |||
183 | def setCfg(self, cfg): | ||
184 | self.data = cfg | ||
185 | |||
186 | def getFailures(self): | ||
187 | """ | ||
188 | Return the number of failed packages | ||
189 | """ | ||
190 | return self._failures | ||
191 | |||
192 | pkgs = property(getPkgs, setPkgs, None, "pkgs property") | ||
193 | name = property(getName, setName, None, "name property") | ||
194 | cfg = property(getCfg, setCfg, None, "cfg property") | ||
195 | |||
196 | |||
197 | |||
198 | |||
199 | |||
200 | class BuildStarted(BuildBase): | ||
201 | """bbmake build run started""" | ||
202 | |||
203 | |||
204 | class BuildCompleted(BuildBase): | ||
205 | """bbmake build run completed""" | ||
206 | |||
207 | |||
208 | |||
209 | |||
210 | class NoProvider(Event): | ||
211 | """No Provider for an Event""" | ||
212 | |||
213 | def __init__(self, item, runtime=False): | ||
214 | Event.__init__(self) | ||
215 | self._item = item | ||
216 | self._runtime = runtime | ||
217 | |||
218 | def getItem(self): | ||
219 | return self._item | ||
220 | |||
221 | def isRuntime(self): | ||
222 | return self._runtime | ||
223 | |||
224 | class MultipleProviders(Event): | ||
225 | """Multiple Providers""" | ||
226 | |||
227 | def __init__(self, item, candidates, runtime = False): | ||
228 | Event.__init__(self) | ||
229 | self._item = item | ||
230 | self._candidates = candidates | ||
231 | self._is_runtime = runtime | ||
232 | |||
233 | def isRuntime(self): | ||
234 | """ | ||
235 | Is this a runtime issue? | ||
236 | """ | ||
237 | return self._is_runtime | ||
238 | |||
239 | def getItem(self): | ||
240 | """ | ||
241 | The name for the to be build item | ||
242 | """ | ||
243 | return self._item | ||
244 | |||
245 | def getCandidates(self): | ||
246 | """ | ||
247 | Get the possible Candidates for a PROVIDER. | ||
248 | """ | ||
249 | return self._candidates | ||
250 | |||
251 | class ParseProgress(Event): | ||
252 | """ | ||
253 | Parsing Progress Event | ||
254 | """ | ||
255 | |||
256 | def __init__(self, cached, parsed, skipped, masked, virtuals, errors, total): | ||
257 | Event.__init__(self) | ||
258 | self.cached = cached | ||
259 | self.parsed = parsed | ||
260 | self.skipped = skipped | ||
261 | self.virtuals = virtuals | ||
262 | self.masked = masked | ||
263 | self.errors = errors | ||
264 | self.sofar = cached + parsed | ||
265 | self.total = total | ||
266 | |||
267 | class DepTreeGenerated(Event): | ||
268 | """ | ||
269 | Event when a dependency tree has been generated | ||
270 | """ | ||
271 | |||
272 | def __init__(self, depgraph): | ||
273 | Event.__init__(self) | ||
274 | self._depgraph = depgraph | ||
275 | |||
diff --git a/bitbake-dev/lib/bb/fetch/__init__.py b/bitbake-dev/lib/bb/fetch/__init__.py deleted file mode 100644 index ab4658bc3b..0000000000 --- a/bitbake-dev/lib/bb/fetch/__init__.py +++ /dev/null | |||
@@ -1,640 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Fetch' implementations | ||
5 | |||
6 | Classes for obtaining upstream sources for the | ||
7 | BitBake build tools. | ||
8 | """ | ||
9 | |||
10 | # Copyright (C) 2003, 2004 Chris Larson | ||
11 | # | ||
12 | # This program is free software; you can redistribute it and/or modify | ||
13 | # it under the terms of the GNU General Public License version 2 as | ||
14 | # published by the Free Software Foundation. | ||
15 | # | ||
16 | # This program is distributed in the hope that it will be useful, | ||
17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | # GNU General Public License for more details. | ||
20 | # | ||
21 | # You should have received a copy of the GNU General Public License along | ||
22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
24 | # | ||
25 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
26 | |||
27 | import os, re | ||
28 | import bb | ||
29 | from bb import data | ||
30 | from bb import persist_data | ||
31 | |||
32 | class FetchError(Exception): | ||
33 | """Exception raised when a download fails""" | ||
34 | |||
35 | class NoMethodError(Exception): | ||
36 | """Exception raised when there is no method to obtain a supplied url or set of urls""" | ||
37 | |||
38 | class MissingParameterError(Exception): | ||
39 | """Exception raised when a fetch method is missing a critical parameter in the url""" | ||
40 | |||
41 | class ParameterError(Exception): | ||
42 | """Exception raised when a url cannot be proccessed due to invalid parameters.""" | ||
43 | |||
44 | class MD5SumError(Exception): | ||
45 | """Exception raised when a MD5SUM of a file does not match the expected one""" | ||
46 | |||
47 | class InvalidSRCREV(Exception): | ||
48 | """Exception raised when an invalid SRCREV is encountered""" | ||
49 | |||
50 | def uri_replace(uri, uri_find, uri_replace, d): | ||
51 | # bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: operating on %s" % uri) | ||
52 | if not uri or not uri_find or not uri_replace: | ||
53 | bb.msg.debug(1, bb.msg.domain.Fetcher, "uri_replace: passed an undefined value, not replacing") | ||
54 | uri_decoded = list(bb.decodeurl(uri)) | ||
55 | uri_find_decoded = list(bb.decodeurl(uri_find)) | ||
56 | uri_replace_decoded = list(bb.decodeurl(uri_replace)) | ||
57 | result_decoded = ['','','','','',{}] | ||
58 | for i in uri_find_decoded: | ||
59 | loc = uri_find_decoded.index(i) | ||
60 | result_decoded[loc] = uri_decoded[loc] | ||
61 | import types | ||
62 | if type(i) == types.StringType: | ||
63 | if (re.match(i, uri_decoded[loc])): | ||
64 | result_decoded[loc] = re.sub(i, uri_replace_decoded[loc], uri_decoded[loc]) | ||
65 | if uri_find_decoded.index(i) == 2: | ||
66 | if d: | ||
67 | localfn = bb.fetch.localpath(uri, d) | ||
68 | if localfn: | ||
69 | result_decoded[loc] = os.path.dirname(result_decoded[loc]) + "/" + os.path.basename(bb.fetch.localpath(uri, d)) | ||
70 | # bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: matching %s against %s and replacing with %s" % (i, uri_decoded[loc], uri_replace_decoded[loc])) | ||
71 | else: | ||
72 | # bb.msg.note(1, bb.msg.domain.Fetcher, "uri_replace: no match") | ||
73 | return uri | ||
74 | # else: | ||
75 | # for j in i.keys(): | ||
76 | # FIXME: apply replacements against options | ||
77 | return bb.encodeurl(result_decoded) | ||
78 | |||
79 | methods = [] | ||
80 | urldata_cache = {} | ||
81 | saved_headrevs = {} | ||
82 | |||
83 | def fetcher_init(d): | ||
84 | """ | ||
85 | Called to initilize the fetchers once the configuration data is known | ||
86 | Calls before this must not hit the cache. | ||
87 | """ | ||
88 | pd = persist_data.PersistData(d) | ||
89 | # When to drop SCM head revisions controled by user policy | ||
90 | srcrev_policy = bb.data.getVar('BB_SRCREV_POLICY', d, 1) or "clear" | ||
91 | if srcrev_policy == "cache": | ||
92 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Keeping SRCREV cache due to cache policy of: %s" % srcrev_policy) | ||
93 | elif srcrev_policy == "clear": | ||
94 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Clearing SRCREV cache due to cache policy of: %s" % srcrev_policy) | ||
95 | try: | ||
96 | bb.fetch.saved_headrevs = pd.getKeyValues("BB_URI_HEADREVS") | ||
97 | except: | ||
98 | pass | ||
99 | pd.delDomain("BB_URI_HEADREVS") | ||
100 | else: | ||
101 | bb.msg.fatal(bb.msg.domain.Fetcher, "Invalid SRCREV cache policy of: %s" % srcrev_policy) | ||
102 | |||
103 | for m in methods: | ||
104 | if hasattr(m, "init"): | ||
105 | m.init(d) | ||
106 | |||
107 | # Make sure our domains exist | ||
108 | pd.addDomain("BB_URI_HEADREVS") | ||
109 | pd.addDomain("BB_URI_LOCALCOUNT") | ||
110 | |||
111 | def fetcher_compare_revisons(d): | ||
112 | """ | ||
113 | Compare the revisions in the persistant cache with current values and | ||
114 | return true/false on whether they've changed. | ||
115 | """ | ||
116 | |||
117 | pd = persist_data.PersistData(d) | ||
118 | data = pd.getKeyValues("BB_URI_HEADREVS") | ||
119 | data2 = bb.fetch.saved_headrevs | ||
120 | |||
121 | changed = False | ||
122 | for key in data: | ||
123 | if key not in data2 or data2[key] != data[key]: | ||
124 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s changed" % key) | ||
125 | changed = True | ||
126 | return True | ||
127 | else: | ||
128 | bb.msg.debug(2, bb.msg.domain.Fetcher, "%s did not change" % key) | ||
129 | return False | ||
130 | |||
131 | # Function call order is usually: | ||
132 | # 1. init | ||
133 | # 2. go | ||
134 | # 3. localpaths | ||
135 | # localpath can be called at any time | ||
136 | |||
137 | def init(urls, d, setup = True): | ||
138 | urldata = {} | ||
139 | fn = bb.data.getVar('FILE', d, 1) | ||
140 | if fn in urldata_cache: | ||
141 | urldata = urldata_cache[fn] | ||
142 | |||
143 | for url in urls: | ||
144 | if url not in urldata: | ||
145 | urldata[url] = FetchData(url, d) | ||
146 | |||
147 | if setup: | ||
148 | for url in urldata: | ||
149 | if not urldata[url].setup: | ||
150 | urldata[url].setup_localpath(d) | ||
151 | |||
152 | urldata_cache[fn] = urldata | ||
153 | return urldata | ||
154 | |||
155 | def go(d, urls = None): | ||
156 | """ | ||
157 | Fetch all urls | ||
158 | init must have previously been called | ||
159 | """ | ||
160 | if not urls: | ||
161 | urls = d.getVar("SRC_URI", 1).split() | ||
162 | urldata = init(urls, d, True) | ||
163 | |||
164 | for u in urls: | ||
165 | ud = urldata[u] | ||
166 | m = ud.method | ||
167 | if ud.localfile: | ||
168 | if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5): | ||
169 | # File already present along with md5 stamp file | ||
170 | # Touch md5 file to show activity | ||
171 | try: | ||
172 | os.utime(ud.md5, None) | ||
173 | except: | ||
174 | # Errors aren't fatal here | ||
175 | pass | ||
176 | continue | ||
177 | lf = bb.utils.lockfile(ud.lockfile) | ||
178 | if not m.forcefetch(u, ud, d) and os.path.exists(ud.md5): | ||
179 | # If someone else fetched this before we got the lock, | ||
180 | # notice and don't try again | ||
181 | try: | ||
182 | os.utime(ud.md5, None) | ||
183 | except: | ||
184 | # Errors aren't fatal here | ||
185 | pass | ||
186 | bb.utils.unlockfile(lf) | ||
187 | continue | ||
188 | m.go(u, ud, d) | ||
189 | if ud.localfile: | ||
190 | if not m.forcefetch(u, ud, d): | ||
191 | Fetch.write_md5sum(u, ud, d) | ||
192 | bb.utils.unlockfile(lf) | ||
193 | |||
194 | |||
195 | def checkstatus(d): | ||
196 | """ | ||
197 | Check all urls exist upstream | ||
198 | init must have previously been called | ||
199 | """ | ||
200 | urldata = init([], d, True) | ||
201 | |||
202 | for u in urldata: | ||
203 | ud = urldata[u] | ||
204 | m = ud.method | ||
205 | bb.msg.note(1, bb.msg.domain.Fetcher, "Testing URL %s" % u) | ||
206 | ret = m.checkstatus(u, ud, d) | ||
207 | if not ret: | ||
208 | bb.msg.fatal(bb.msg.domain.Fetcher, "URL %s doesn't work" % u) | ||
209 | |||
210 | def localpaths(d): | ||
211 | """ | ||
212 | Return a list of the local filenames, assuming successful fetch | ||
213 | """ | ||
214 | local = [] | ||
215 | urldata = init([], d, True) | ||
216 | |||
217 | for u in urldata: | ||
218 | ud = urldata[u] | ||
219 | local.append(ud.localpath) | ||
220 | |||
221 | return local | ||
222 | |||
223 | srcrev_internal_call = False | ||
224 | |||
225 | def get_srcrev(d): | ||
226 | """ | ||
227 | Return the version string for the current package | ||
228 | (usually to be used as PV) | ||
229 | Most packages usually only have one SCM so we just pass on the call. | ||
230 | In the multi SCM case, we build a value based on SRCREV_FORMAT which must | ||
231 | have been set. | ||
232 | """ | ||
233 | |||
234 | # | ||
235 | # Ugly code alert. localpath in the fetchers will try to evaluate SRCREV which | ||
236 | # could translate into a call to here. If it does, we need to catch this | ||
237 | # and provide some way so it knows get_srcrev is active instead of being | ||
238 | # some number etc. hence the srcrev_internal_call tracking and the magic | ||
239 | # "SRCREVINACTION" return value. | ||
240 | # | ||
241 | # Neater solutions welcome! | ||
242 | # | ||
243 | if bb.fetch.srcrev_internal_call: | ||
244 | return "SRCREVINACTION" | ||
245 | |||
246 | scms = [] | ||
247 | |||
248 | # Only call setup_localpath on URIs which suppports_srcrev() | ||
249 | urldata = init(bb.data.getVar('SRC_URI', d, 1).split(), d, False) | ||
250 | for u in urldata: | ||
251 | ud = urldata[u] | ||
252 | if ud.method.suppports_srcrev(): | ||
253 | if not ud.setup: | ||
254 | ud.setup_localpath(d) | ||
255 | scms.append(u) | ||
256 | |||
257 | if len(scms) == 0: | ||
258 | bb.msg.error(bb.msg.domain.Fetcher, "SRCREV was used yet no valid SCM was found in SRC_URI") | ||
259 | raise ParameterError | ||
260 | |||
261 | bb.data.setVar('__BB_DONT_CACHE','1', d) | ||
262 | |||
263 | if len(scms) == 1: | ||
264 | return urldata[scms[0]].method.sortable_revision(scms[0], urldata[scms[0]], d) | ||
265 | |||
266 | # | ||
267 | # Mutiple SCMs are in SRC_URI so we resort to SRCREV_FORMAT | ||
268 | # | ||
269 | format = bb.data.getVar('SRCREV_FORMAT', d, 1) | ||
270 | if not format: | ||
271 | bb.msg.error(bb.msg.domain.Fetcher, "The SRCREV_FORMAT variable must be set when multiple SCMs are used.") | ||
272 | raise ParameterError | ||
273 | |||
274 | for scm in scms: | ||
275 | if 'name' in urldata[scm].parm: | ||
276 | name = urldata[scm].parm["name"] | ||
277 | rev = urldata[scm].method.sortable_revision(scm, urldata[scm], d) | ||
278 | format = format.replace(name, rev) | ||
279 | |||
280 | return format | ||
281 | |||
282 | def localpath(url, d, cache = True): | ||
283 | """ | ||
284 | Called from the parser with cache=False since the cache isn't ready | ||
285 | at this point. Also called from classed in OE e.g. patch.bbclass | ||
286 | """ | ||
287 | ud = init([url], d) | ||
288 | if ud[url].method: | ||
289 | return ud[url].localpath | ||
290 | return url | ||
291 | |||
292 | def runfetchcmd(cmd, d, quiet = False): | ||
293 | """ | ||
294 | Run cmd returning the command output | ||
295 | Raise an error if interrupted or cmd fails | ||
296 | Optionally echo command output to stdout | ||
297 | """ | ||
298 | |||
299 | # Need to export PATH as binary could be in metadata paths | ||
300 | # rather than host provided | ||
301 | # Also include some other variables. | ||
302 | # FIXME: Should really include all export varaiables? | ||
303 | exportvars = ['PATH', 'GIT_PROXY_COMMAND', 'GIT_PROXY_HOST', 'GIT_PROXY_PORT', 'GIT_CONFIG', 'http_proxy', 'ftp_proxy', 'SSH_AUTH_SOCK', 'SSH_AGENT_PID', 'HOME'] | ||
304 | |||
305 | for var in exportvars: | ||
306 | val = data.getVar(var, d, True) | ||
307 | if val: | ||
308 | cmd = 'export ' + var + '=%s; %s' % (val, cmd) | ||
309 | |||
310 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cmd) | ||
311 | |||
312 | # redirect stderr to stdout | ||
313 | stdout_handle = os.popen(cmd + " 2>&1", "r") | ||
314 | output = "" | ||
315 | |||
316 | while 1: | ||
317 | line = stdout_handle.readline() | ||
318 | if not line: | ||
319 | break | ||
320 | if not quiet: | ||
321 | print line, | ||
322 | output += line | ||
323 | |||
324 | status = stdout_handle.close() or 0 | ||
325 | signal = status >> 8 | ||
326 | exitstatus = status & 0xff | ||
327 | |||
328 | if signal: | ||
329 | raise FetchError("Fetch command %s failed with signal %s, output:\n%s" % (cmd, signal, output)) | ||
330 | elif status != 0: | ||
331 | raise FetchError("Fetch command %s failed with exit code %s, output:\n%s" % (cmd, status, output)) | ||
332 | |||
333 | return output | ||
334 | |||
335 | class FetchData(object): | ||
336 | """ | ||
337 | A class which represents the fetcher state for a given URI. | ||
338 | """ | ||
339 | def __init__(self, url, d): | ||
340 | self.localfile = "" | ||
341 | (self.type, self.host, self.path, self.user, self.pswd, self.parm) = bb.decodeurl(data.expand(url, d)) | ||
342 | self.date = Fetch.getSRCDate(self, d) | ||
343 | self.url = url | ||
344 | if not self.user and "user" in self.parm: | ||
345 | self.user = self.parm["user"] | ||
346 | if not self.pswd and "pswd" in self.parm: | ||
347 | self.pswd = self.parm["pswd"] | ||
348 | self.setup = False | ||
349 | for m in methods: | ||
350 | if m.supports(url, self, d): | ||
351 | self.method = m | ||
352 | return | ||
353 | raise NoMethodError("Missing implementation for url %s" % url) | ||
354 | |||
355 | def setup_localpath(self, d): | ||
356 | self.setup = True | ||
357 | if "localpath" in self.parm: | ||
358 | # if user sets localpath for file, use it instead. | ||
359 | self.localpath = self.parm["localpath"] | ||
360 | else: | ||
361 | try: | ||
362 | bb.fetch.srcrev_internal_call = True | ||
363 | self.localpath = self.method.localpath(self.url, self, d) | ||
364 | finally: | ||
365 | bb.fetch.srcrev_internal_call = False | ||
366 | # We have to clear data's internal caches since the cached value of SRCREV is now wrong. | ||
367 | # Horrible... | ||
368 | bb.data.delVar("ISHOULDNEVEREXIST", d) | ||
369 | self.md5 = self.localpath + '.md5' | ||
370 | self.lockfile = self.localpath + '.lock' | ||
371 | |||
372 | |||
373 | class Fetch(object): | ||
374 | """Base class for 'fetch'ing data""" | ||
375 | |||
376 | def __init__(self, urls = []): | ||
377 | self.urls = [] | ||
378 | |||
379 | def supports(self, url, urldata, d): | ||
380 | """ | ||
381 | Check to see if this fetch class supports a given url. | ||
382 | """ | ||
383 | return 0 | ||
384 | |||
385 | def localpath(self, url, urldata, d): | ||
386 | """ | ||
387 | Return the local filename of a given url assuming a successful fetch. | ||
388 | Can also setup variables in urldata for use in go (saving code duplication | ||
389 | and duplicate code execution) | ||
390 | """ | ||
391 | return url | ||
392 | |||
393 | def setUrls(self, urls): | ||
394 | self.__urls = urls | ||
395 | |||
396 | def getUrls(self): | ||
397 | return self.__urls | ||
398 | |||
399 | urls = property(getUrls, setUrls, None, "Urls property") | ||
400 | |||
401 | def forcefetch(self, url, urldata, d): | ||
402 | """ | ||
403 | Force a fetch, even if localpath exists? | ||
404 | """ | ||
405 | return False | ||
406 | |||
407 | def suppports_srcrev(self): | ||
408 | """ | ||
409 | The fetcher supports auto source revisions (SRCREV) | ||
410 | """ | ||
411 | return False | ||
412 | |||
413 | def go(self, url, urldata, d): | ||
414 | """ | ||
415 | Fetch urls | ||
416 | Assumes localpath was called first | ||
417 | """ | ||
418 | raise NoMethodError("Missing implementation for url") | ||
419 | |||
420 | def checkstatus(self, url, urldata, d): | ||
421 | """ | ||
422 | Check the status of a URL | ||
423 | Assumes localpath was called first | ||
424 | """ | ||
425 | bb.msg.note(1, bb.msg.domain.Fetcher, "URL %s could not be checked for status since no method exists." % url) | ||
426 | return True | ||
427 | |||
428 | def getSRCDate(urldata, d): | ||
429 | """ | ||
430 | Return the SRC Date for the component | ||
431 | |||
432 | d the bb.data module | ||
433 | """ | ||
434 | if "srcdate" in urldata.parm: | ||
435 | return urldata.parm['srcdate'] | ||
436 | |||
437 | pn = data.getVar("PN", d, 1) | ||
438 | |||
439 | if pn: | ||
440 | return data.getVar("SRCDATE_%s" % pn, d, 1) or data.getVar("CVSDATE_%s" % pn, d, 1) or data.getVar("SRCDATE", d, 1) or data.getVar("CVSDATE", d, 1) or data.getVar("DATE", d, 1) | ||
441 | |||
442 | return data.getVar("SRCDATE", d, 1) or data.getVar("CVSDATE", d, 1) or data.getVar("DATE", d, 1) | ||
443 | getSRCDate = staticmethod(getSRCDate) | ||
444 | |||
445 | def srcrev_internal_helper(ud, d): | ||
446 | """ | ||
447 | Return: | ||
448 | a) a source revision if specified | ||
449 | b) True if auto srcrev is in action | ||
450 | c) False otherwise | ||
451 | """ | ||
452 | |||
453 | if 'rev' in ud.parm: | ||
454 | return ud.parm['rev'] | ||
455 | |||
456 | if 'tag' in ud.parm: | ||
457 | return ud.parm['tag'] | ||
458 | |||
459 | rev = None | ||
460 | if 'name' in ud.parm: | ||
461 | pn = data.getVar("PN", d, 1) | ||
462 | rev = data.getVar("SRCREV_pn-" + pn + "_" + ud.parm['name'], d, 1) | ||
463 | if not rev: | ||
464 | rev = data.getVar("SRCREV", d, 1) | ||
465 | if rev == "INVALID": | ||
466 | raise InvalidSRCREV("Please set SRCREV to a valid value") | ||
467 | if not rev: | ||
468 | return False | ||
469 | if rev is "SRCREVINACTION": | ||
470 | return True | ||
471 | return rev | ||
472 | |||
473 | srcrev_internal_helper = staticmethod(srcrev_internal_helper) | ||
474 | |||
475 | def localcount_internal_helper(ud, d): | ||
476 | """ | ||
477 | Return: | ||
478 | a) a locked localcount if specified | ||
479 | b) None otherwise | ||
480 | """ | ||
481 | |||
482 | localcount= None | ||
483 | if 'name' in ud.parm: | ||
484 | pn = data.getVar("PN", d, 1) | ||
485 | localcount = data.getVar("LOCALCOUNT_" + ud.parm['name'], d, 1) | ||
486 | if not localcount: | ||
487 | localcount = data.getVar("LOCALCOUNT", d, 1) | ||
488 | return localcount | ||
489 | |||
490 | localcount_internal_helper = staticmethod(localcount_internal_helper) | ||
491 | |||
492 | def try_mirror(d, tarfn): | ||
493 | """ | ||
494 | Try to use a mirrored version of the sources. We do this | ||
495 | to avoid massive loads on foreign cvs and svn servers. | ||
496 | This method will be used by the different fetcher | ||
497 | implementations. | ||
498 | |||
499 | d Is a bb.data instance | ||
500 | tarfn is the name of the tarball | ||
501 | """ | ||
502 | tarpath = os.path.join(data.getVar("DL_DIR", d, 1), tarfn) | ||
503 | if os.access(tarpath, os.R_OK): | ||
504 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists, skipping checkout." % tarfn) | ||
505 | return True | ||
506 | |||
507 | pn = data.getVar('PN', d, True) | ||
508 | src_tarball_stash = None | ||
509 | if pn: | ||
510 | src_tarball_stash = (data.getVar('SRC_TARBALL_STASH_%s' % pn, d, True) or data.getVar('CVS_TARBALL_STASH_%s' % pn, d, True) or data.getVar('SRC_TARBALL_STASH', d, True) or data.getVar('CVS_TARBALL_STASH', d, True) or "").split() | ||
511 | |||
512 | ld = d.createCopy() | ||
513 | for stash in src_tarball_stash: | ||
514 | url = stash + tarfn | ||
515 | try: | ||
516 | ud = FetchData(url, ld) | ||
517 | except bb.fetch.NoMethodError: | ||
518 | bb.msg.debug(1, bb.msg.domain.Fetcher, "No method for %s" % url) | ||
519 | continue | ||
520 | |||
521 | ud.setup_localpath(ld) | ||
522 | |||
523 | try: | ||
524 | ud.method.go(url, ud, ld) | ||
525 | return True | ||
526 | except (bb.fetch.MissingParameterError, | ||
527 | bb.fetch.FetchError, | ||
528 | bb.fetch.MD5SumError): | ||
529 | import sys | ||
530 | (type, value, traceback) = sys.exc_info() | ||
531 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Tarball stash fetch failure: %s" % value) | ||
532 | return False | ||
533 | try_mirror = staticmethod(try_mirror) | ||
534 | |||
535 | def verify_md5sum(ud, got_sum): | ||
536 | """ | ||
537 | Verify the md5sum we wanted with the one we got | ||
538 | """ | ||
539 | wanted_sum = None | ||
540 | if 'md5sum' in ud.parm: | ||
541 | wanted_sum = ud.parm['md5sum'] | ||
542 | if not wanted_sum: | ||
543 | return True | ||
544 | |||
545 | return wanted_sum == got_sum | ||
546 | verify_md5sum = staticmethod(verify_md5sum) | ||
547 | |||
548 | def write_md5sum(url, ud, d): | ||
549 | md5data = bb.utils.md5_file(ud.localpath) | ||
550 | # verify the md5sum | ||
551 | if not Fetch.verify_md5sum(ud, md5data): | ||
552 | raise MD5SumError(url) | ||
553 | |||
554 | md5out = file(ud.md5, 'w') | ||
555 | md5out.write(md5data) | ||
556 | md5out.close() | ||
557 | write_md5sum = staticmethod(write_md5sum) | ||
558 | |||
559 | def latest_revision(self, url, ud, d): | ||
560 | """ | ||
561 | Look in the cache for the latest revision, if not present ask the SCM. | ||
562 | """ | ||
563 | if not hasattr(self, "_latest_revision"): | ||
564 | raise ParameterError | ||
565 | |||
566 | pd = persist_data.PersistData(d) | ||
567 | key = self.generate_revision_key(url, ud, d) | ||
568 | rev = pd.getValue("BB_URI_HEADREVS", key) | ||
569 | if rev != None: | ||
570 | return str(rev) | ||
571 | |||
572 | rev = self._latest_revision(url, ud, d) | ||
573 | pd.setValue("BB_URI_HEADREVS", key, rev) | ||
574 | return rev | ||
575 | |||
576 | def sortable_revision(self, url, ud, d): | ||
577 | """ | ||
578 | |||
579 | """ | ||
580 | if hasattr(self, "_sortable_revision"): | ||
581 | return self._sortable_revision(url, ud, d) | ||
582 | |||
583 | pd = persist_data.PersistData(d) | ||
584 | key = self.generate_revision_key(url, ud, d) | ||
585 | |||
586 | latest_rev = self._build_revision(url, ud, d) | ||
587 | last_rev = pd.getValue("BB_URI_LOCALCOUNT", key + "_rev") | ||
588 | uselocalcount = bb.data.getVar("BB_LOCALCOUNT_OVERRIDE", d, True) or False | ||
589 | count = None | ||
590 | if uselocalcount: | ||
591 | count = Fetch.localcount_internal_helper(ud, d) | ||
592 | if count is None: | ||
593 | count = pd.getValue("BB_URI_LOCALCOUNT", key + "_count") | ||
594 | |||
595 | if last_rev == latest_rev: | ||
596 | return str(count + "+" + latest_rev) | ||
597 | |||
598 | buildindex_provided = hasattr(self, "_sortable_buildindex") | ||
599 | if buildindex_provided: | ||
600 | count = self._sortable_buildindex(url, ud, d, latest_rev) | ||
601 | |||
602 | if count is None: | ||
603 | count = "0" | ||
604 | elif uselocalcount or buildindex_provided: | ||
605 | count = str(count) | ||
606 | else: | ||
607 | count = str(int(count) + 1) | ||
608 | |||
609 | pd.setValue("BB_URI_LOCALCOUNT", key + "_rev", latest_rev) | ||
610 | pd.setValue("BB_URI_LOCALCOUNT", key + "_count", count) | ||
611 | |||
612 | return str(count + "+" + latest_rev) | ||
613 | |||
614 | def generate_revision_key(self, url, ud, d): | ||
615 | key = self._revision_key(url, ud, d) | ||
616 | return "%s-%s" % (key, bb.data.getVar("PN", d, True) or "") | ||
617 | |||
618 | import cvs | ||
619 | import git | ||
620 | import local | ||
621 | import svn | ||
622 | import wget | ||
623 | import svk | ||
624 | import ssh | ||
625 | import perforce | ||
626 | import bzr | ||
627 | import hg | ||
628 | import osc | ||
629 | |||
630 | methods.append(local.Local()) | ||
631 | methods.append(wget.Wget()) | ||
632 | methods.append(svn.Svn()) | ||
633 | methods.append(git.Git()) | ||
634 | methods.append(cvs.Cvs()) | ||
635 | methods.append(svk.Svk()) | ||
636 | methods.append(ssh.SSH()) | ||
637 | methods.append(perforce.Perforce()) | ||
638 | methods.append(bzr.Bzr()) | ||
639 | methods.append(hg.Hg()) | ||
640 | methods.append(osc.Osc()) | ||
diff --git a/bitbake-dev/lib/bb/fetch/bzr.py b/bitbake-dev/lib/bb/fetch/bzr.py deleted file mode 100644 index b27fb63d07..0000000000 --- a/bitbake-dev/lib/bb/fetch/bzr.py +++ /dev/null | |||
@@ -1,153 +0,0 @@ | |||
1 | """ | ||
2 | BitBake 'Fetch' implementation for bzr. | ||
3 | |||
4 | """ | ||
5 | |||
6 | # Copyright (C) 2007 Ross Burton | ||
7 | # Copyright (C) 2007 Richard Purdie | ||
8 | # | ||
9 | # Classes for obtaining upstream sources for the | ||
10 | # BitBake build tools. | ||
11 | # Copyright (C) 2003, 2004 Chris Larson | ||
12 | # | ||
13 | # This program is free software; you can redistribute it and/or modify | ||
14 | # it under the terms of the GNU General Public License version 2 as | ||
15 | # published by the Free Software Foundation. | ||
16 | # | ||
17 | # This program is distributed in the hope that it will be useful, | ||
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | # GNU General Public License for more details. | ||
21 | # | ||
22 | # You should have received a copy of the GNU General Public License along | ||
23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
25 | |||
26 | import os | ||
27 | import sys | ||
28 | import bb | ||
29 | from bb import data | ||
30 | from bb.fetch import Fetch | ||
31 | from bb.fetch import FetchError | ||
32 | from bb.fetch import runfetchcmd | ||
33 | |||
34 | class Bzr(Fetch): | ||
35 | def supports(self, url, ud, d): | ||
36 | return ud.type in ['bzr'] | ||
37 | |||
38 | def localpath (self, url, ud, d): | ||
39 | |||
40 | # Create paths to bzr checkouts | ||
41 | relpath = ud.path | ||
42 | if relpath.startswith('/'): | ||
43 | # Remove leading slash as os.path.join can't cope | ||
44 | relpath = relpath[1:] | ||
45 | ud.pkgdir = os.path.join(data.expand('${BZRDIR}', d), ud.host, relpath) | ||
46 | |||
47 | revision = Fetch.srcrev_internal_helper(ud, d) | ||
48 | if revision is True: | ||
49 | ud.revision = self.latest_revision(url, ud, d) | ||
50 | elif revision: | ||
51 | ud.revision = revision | ||
52 | |||
53 | if not ud.revision: | ||
54 | ud.revision = self.latest_revision(url, ud, d) | ||
55 | |||
56 | ud.localfile = data.expand('bzr_%s_%s_%s.tar.gz' % (ud.host, ud.path.replace('/', '.'), ud.revision), d) | ||
57 | |||
58 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
59 | |||
60 | def _buildbzrcommand(self, ud, d, command): | ||
61 | """ | ||
62 | Build up an bzr commandline based on ud | ||
63 | command is "fetch", "update", "revno" | ||
64 | """ | ||
65 | |||
66 | basecmd = data.expand('${FETCHCMD_bzr}', d) | ||
67 | |||
68 | proto = "http" | ||
69 | if "proto" in ud.parm: | ||
70 | proto = ud.parm["proto"] | ||
71 | |||
72 | bzrroot = ud.host + ud.path | ||
73 | |||
74 | options = [] | ||
75 | |||
76 | if command is "revno": | ||
77 | bzrcmd = "%s revno %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot) | ||
78 | else: | ||
79 | if ud.revision: | ||
80 | options.append("-r %s" % ud.revision) | ||
81 | |||
82 | if command is "fetch": | ||
83 | bzrcmd = "%s co %s %s://%s" % (basecmd, " ".join(options), proto, bzrroot) | ||
84 | elif command is "update": | ||
85 | bzrcmd = "%s pull %s --overwrite" % (basecmd, " ".join(options)) | ||
86 | else: | ||
87 | raise FetchError("Invalid bzr command %s" % command) | ||
88 | |||
89 | return bzrcmd | ||
90 | |||
91 | def go(self, loc, ud, d): | ||
92 | """Fetch url""" | ||
93 | |||
94 | # try to use the tarball stash | ||
95 | if Fetch.try_mirror(d, ud.localfile): | ||
96 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping bzr checkout." % ud.localpath) | ||
97 | return | ||
98 | |||
99 | if os.access(os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir), '.bzr'), os.R_OK): | ||
100 | bzrcmd = self._buildbzrcommand(ud, d, "update") | ||
101 | bb.msg.debug(1, bb.msg.domain.Fetcher, "BZR Update %s" % loc) | ||
102 | os.chdir(os.path.join (ud.pkgdir, os.path.basename(ud.path))) | ||
103 | runfetchcmd(bzrcmd, d) | ||
104 | else: | ||
105 | os.system("rm -rf %s" % os.path.join(ud.pkgdir, os.path.basename(ud.pkgdir))) | ||
106 | bzrcmd = self._buildbzrcommand(ud, d, "fetch") | ||
107 | bb.msg.debug(1, bb.msg.domain.Fetcher, "BZR Checkout %s" % loc) | ||
108 | bb.mkdirhier(ud.pkgdir) | ||
109 | os.chdir(ud.pkgdir) | ||
110 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % bzrcmd) | ||
111 | runfetchcmd(bzrcmd, d) | ||
112 | |||
113 | os.chdir(ud.pkgdir) | ||
114 | # tar them up to a defined filename | ||
115 | try: | ||
116 | runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.pkgdir)), d) | ||
117 | except: | ||
118 | t, v, tb = sys.exc_info() | ||
119 | try: | ||
120 | os.unlink(ud.localpath) | ||
121 | except OSError: | ||
122 | pass | ||
123 | raise t, v, tb | ||
124 | |||
125 | def suppports_srcrev(self): | ||
126 | return True | ||
127 | |||
128 | def _revision_key(self, url, ud, d): | ||
129 | """ | ||
130 | Return a unique key for the url | ||
131 | """ | ||
132 | return "bzr:" + ud.pkgdir | ||
133 | |||
134 | def _latest_revision(self, url, ud, d): | ||
135 | """ | ||
136 | Return the latest upstream revision number | ||
137 | """ | ||
138 | bb.msg.debug(2, bb.msg.domain.Fetcher, "BZR fetcher hitting network for %s" % url) | ||
139 | |||
140 | output = runfetchcmd(self._buildbzrcommand(ud, d, "revno"), d, True) | ||
141 | |||
142 | return output.strip() | ||
143 | |||
144 | def _sortable_revision(self, url, ud, d): | ||
145 | """ | ||
146 | Return a sortable revision number which in our case is the revision number | ||
147 | """ | ||
148 | |||
149 | return self._build_revision(url, ud, d) | ||
150 | |||
151 | def _build_revision(self, url, ud, d): | ||
152 | return ud.revision | ||
153 | |||
diff --git a/bitbake-dev/lib/bb/fetch/cvs.py b/bitbake-dev/lib/bb/fetch/cvs.py deleted file mode 100644 index 90a006500e..0000000000 --- a/bitbake-dev/lib/bb/fetch/cvs.py +++ /dev/null | |||
@@ -1,182 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Fetch' implementations | ||
5 | |||
6 | Classes for obtaining upstream sources for the | ||
7 | BitBake build tools. | ||
8 | |||
9 | """ | ||
10 | |||
11 | # Copyright (C) 2003, 2004 Chris Larson | ||
12 | # | ||
13 | # This program is free software; you can redistribute it and/or modify | ||
14 | # it under the terms of the GNU General Public License version 2 as | ||
15 | # published by the Free Software Foundation. | ||
16 | # | ||
17 | # This program is distributed in the hope that it will be useful, | ||
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | # GNU General Public License for more details. | ||
21 | # | ||
22 | # You should have received a copy of the GNU General Public License along | ||
23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
25 | # | ||
26 | #Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
27 | # | ||
28 | |||
29 | import os | ||
30 | import bb | ||
31 | from bb import data | ||
32 | from bb.fetch import Fetch | ||
33 | from bb.fetch import FetchError | ||
34 | from bb.fetch import MissingParameterError | ||
35 | |||
36 | class Cvs(Fetch): | ||
37 | """ | ||
38 | Class to fetch a module or modules from cvs repositories | ||
39 | """ | ||
40 | def supports(self, url, ud, d): | ||
41 | """ | ||
42 | Check to see if a given url can be fetched with cvs. | ||
43 | """ | ||
44 | return ud.type in ['cvs'] | ||
45 | |||
46 | def localpath(self, url, ud, d): | ||
47 | if not "module" in ud.parm: | ||
48 | raise MissingParameterError("cvs method needs a 'module' parameter") | ||
49 | ud.module = ud.parm["module"] | ||
50 | |||
51 | ud.tag = "" | ||
52 | if 'tag' in ud.parm: | ||
53 | ud.tag = ud.parm['tag'] | ||
54 | |||
55 | # Override the default date in certain cases | ||
56 | if 'date' in ud.parm: | ||
57 | ud.date = ud.parm['date'] | ||
58 | elif ud.tag: | ||
59 | ud.date = "" | ||
60 | |||
61 | norecurse = '' | ||
62 | if 'norecurse' in ud.parm: | ||
63 | norecurse = '_norecurse' | ||
64 | |||
65 | fullpath = '' | ||
66 | if 'fullpath' in ud.parm: | ||
67 | fullpath = '_fullpath' | ||
68 | |||
69 | ud.localfile = data.expand('%s_%s_%s_%s%s%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.tag, ud.date, norecurse, fullpath), d) | ||
70 | |||
71 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
72 | |||
73 | def forcefetch(self, url, ud, d): | ||
74 | if (ud.date == "now"): | ||
75 | return True | ||
76 | return False | ||
77 | |||
78 | def go(self, loc, ud, d): | ||
79 | |||
80 | # try to use the tarball stash | ||
81 | if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile): | ||
82 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping cvs checkout." % ud.localpath) | ||
83 | return | ||
84 | |||
85 | method = "pserver" | ||
86 | if "method" in ud.parm: | ||
87 | method = ud.parm["method"] | ||
88 | |||
89 | localdir = ud.module | ||
90 | if "localdir" in ud.parm: | ||
91 | localdir = ud.parm["localdir"] | ||
92 | |||
93 | cvs_port = "" | ||
94 | if "port" in ud.parm: | ||
95 | cvs_port = ud.parm["port"] | ||
96 | |||
97 | cvs_rsh = None | ||
98 | if method == "ext": | ||
99 | if "rsh" in ud.parm: | ||
100 | cvs_rsh = ud.parm["rsh"] | ||
101 | |||
102 | if method == "dir": | ||
103 | cvsroot = ud.path | ||
104 | else: | ||
105 | cvsroot = ":" + method | ||
106 | cvsproxyhost = data.getVar('CVS_PROXY_HOST', d, True) | ||
107 | if cvsproxyhost: | ||
108 | cvsroot += ";proxy=" + cvsproxyhost | ||
109 | cvsproxyport = data.getVar('CVS_PROXY_PORT', d, True) | ||
110 | if cvsproxyport: | ||
111 | cvsroot += ";proxyport=" + cvsproxyport | ||
112 | cvsroot += ":" + ud.user | ||
113 | if ud.pswd: | ||
114 | cvsroot += ":" + ud.pswd | ||
115 | cvsroot += "@" + ud.host + ":" + cvs_port + ud.path | ||
116 | |||
117 | options = [] | ||
118 | if 'norecurse' in ud.parm: | ||
119 | options.append("-l") | ||
120 | if ud.date: | ||
121 | # treat YYYYMMDDHHMM specially for CVS | ||
122 | if len(ud.date) == 12: | ||
123 | options.append("-D \"%s %s:%s UTC\"" % (ud.date[0:8], ud.date[8:10], ud.date[10:12])) | ||
124 | else: | ||
125 | options.append("-D \"%s UTC\"" % ud.date) | ||
126 | if ud.tag: | ||
127 | options.append("-r %s" % ud.tag) | ||
128 | |||
129 | localdata = data.createCopy(d) | ||
130 | data.setVar('OVERRIDES', "cvs:%s" % data.getVar('OVERRIDES', localdata), localdata) | ||
131 | data.update_data(localdata) | ||
132 | |||
133 | data.setVar('CVSROOT', cvsroot, localdata) | ||
134 | data.setVar('CVSCOOPTS', " ".join(options), localdata) | ||
135 | data.setVar('CVSMODULE', ud.module, localdata) | ||
136 | cvscmd = data.getVar('FETCHCOMMAND', localdata, 1) | ||
137 | cvsupdatecmd = data.getVar('UPDATECOMMAND', localdata, 1) | ||
138 | |||
139 | if cvs_rsh: | ||
140 | cvscmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvscmd) | ||
141 | cvsupdatecmd = "CVS_RSH=\"%s\" %s" % (cvs_rsh, cvsupdatecmd) | ||
142 | |||
143 | # create module directory | ||
144 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory") | ||
145 | pkg = data.expand('${PN}', d) | ||
146 | pkgdir = os.path.join(data.expand('${CVSDIR}', localdata), pkg) | ||
147 | moddir = os.path.join(pkgdir,localdir) | ||
148 | if os.access(os.path.join(moddir,'CVS'), os.R_OK): | ||
149 | bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) | ||
150 | # update sources there | ||
151 | os.chdir(moddir) | ||
152 | myret = os.system(cvsupdatecmd) | ||
153 | else: | ||
154 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
155 | # check out sources there | ||
156 | bb.mkdirhier(pkgdir) | ||
157 | os.chdir(pkgdir) | ||
158 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % cvscmd) | ||
159 | myret = os.system(cvscmd) | ||
160 | |||
161 | if myret != 0 or not os.access(moddir, os.R_OK): | ||
162 | try: | ||
163 | os.rmdir(moddir) | ||
164 | except OSError: | ||
165 | pass | ||
166 | raise FetchError(ud.module) | ||
167 | |||
168 | # tar them up to a defined filename | ||
169 | if 'fullpath' in ud.parm: | ||
170 | os.chdir(pkgdir) | ||
171 | myret = os.system("tar -czf %s %s" % (ud.localpath, localdir)) | ||
172 | else: | ||
173 | os.chdir(moddir) | ||
174 | os.chdir('..') | ||
175 | myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(moddir))) | ||
176 | |||
177 | if myret != 0: | ||
178 | try: | ||
179 | os.unlink(ud.localpath) | ||
180 | except OSError: | ||
181 | pass | ||
182 | raise FetchError(ud.module) | ||
diff --git a/bitbake-dev/lib/bb/fetch/git.py b/bitbake-dev/lib/bb/fetch/git.py deleted file mode 100644 index 0e68325db9..0000000000 --- a/bitbake-dev/lib/bb/fetch/git.py +++ /dev/null | |||
@@ -1,216 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Fetch' git implementation | ||
5 | |||
6 | """ | ||
7 | |||
8 | #Copyright (C) 2005 Richard Purdie | ||
9 | # | ||
10 | # This program is free software; you can redistribute it and/or modify | ||
11 | # it under the terms of the GNU General Public License version 2 as | ||
12 | # published by the Free Software Foundation. | ||
13 | # | ||
14 | # This program is distributed in the hope that it will be useful, | ||
15 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
16 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
17 | # GNU General Public License for more details. | ||
18 | # | ||
19 | # You should have received a copy of the GNU General Public License along | ||
20 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
21 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
22 | |||
23 | import os | ||
24 | import bb | ||
25 | from bb import data | ||
26 | from bb.fetch import Fetch | ||
27 | from bb.fetch import runfetchcmd | ||
28 | |||
29 | class Git(Fetch): | ||
30 | """Class to fetch a module or modules from git repositories""" | ||
31 | def init(self, d): | ||
32 | # | ||
33 | # Only enable _sortable revision if the key is set | ||
34 | # | ||
35 | if bb.data.getVar("BB_GIT_CLONE_FOR_SRCREV", d, True): | ||
36 | self._sortable_buildindex = self._sortable_buildindex_disabled | ||
37 | def supports(self, url, ud, d): | ||
38 | """ | ||
39 | Check to see if a given url can be fetched with git. | ||
40 | """ | ||
41 | return ud.type in ['git'] | ||
42 | |||
43 | def localpath(self, url, ud, d): | ||
44 | |||
45 | if 'protocol' in ud.parm: | ||
46 | ud.proto = ud.parm['protocol'] | ||
47 | elif not ud.host: | ||
48 | ud.proto = 'file' | ||
49 | else: | ||
50 | ud.proto = "rsync" | ||
51 | |||
52 | ud.branch = ud.parm.get("branch", "master") | ||
53 | |||
54 | gitsrcname = '%s%s' % (ud.host, ud.path.replace('/', '.')) | ||
55 | ud.mirrortarball = 'git_%s.tar.gz' % (gitsrcname) | ||
56 | ud.clonedir = os.path.join(data.expand('${GITDIR}', d), gitsrcname) | ||
57 | |||
58 | tag = Fetch.srcrev_internal_helper(ud, d) | ||
59 | if tag is True: | ||
60 | ud.tag = self.latest_revision(url, ud, d) | ||
61 | elif tag: | ||
62 | ud.tag = tag | ||
63 | |||
64 | if not ud.tag or ud.tag == "master": | ||
65 | ud.tag = self.latest_revision(url, ud, d) | ||
66 | |||
67 | subdir = ud.parm.get("subpath", "") | ||
68 | if subdir != "": | ||
69 | if subdir.endswith("/"): | ||
70 | subdir = subdir[:-1] | ||
71 | subdirpath = os.path.join(ud.path, subdir); | ||
72 | else: | ||
73 | subdirpath = ud.path; | ||
74 | |||
75 | if 'fullclone' in ud.parm: | ||
76 | ud.localfile = ud.mirrortarball | ||
77 | else: | ||
78 | ud.localfile = data.expand('git_%s%s_%s.tar.gz' % (ud.host, subdirpath.replace('/', '.'), ud.tag), d) | ||
79 | |||
80 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
81 | |||
82 | def go(self, loc, ud, d): | ||
83 | """Fetch url""" | ||
84 | |||
85 | if Fetch.try_mirror(d, ud.localfile): | ||
86 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists (or was stashed). Skipping git checkout." % ud.localpath) | ||
87 | return | ||
88 | |||
89 | if ud.user: | ||
90 | username = ud.user + '@' | ||
91 | else: | ||
92 | username = "" | ||
93 | |||
94 | repofile = os.path.join(data.getVar("DL_DIR", d, 1), ud.mirrortarball) | ||
95 | |||
96 | coname = '%s' % (ud.tag) | ||
97 | codir = os.path.join(ud.clonedir, coname) | ||
98 | |||
99 | if not os.path.exists(ud.clonedir): | ||
100 | if Fetch.try_mirror(d, ud.mirrortarball): | ||
101 | bb.mkdirhier(ud.clonedir) | ||
102 | os.chdir(ud.clonedir) | ||
103 | runfetchcmd("tar -xzf %s" % (repofile), d) | ||
104 | else: | ||
105 | runfetchcmd("git clone -n %s://%s%s%s %s" % (ud.proto, username, ud.host, ud.path, ud.clonedir), d) | ||
106 | |||
107 | os.chdir(ud.clonedir) | ||
108 | # Remove all but the .git directory | ||
109 | if not self._contains_ref(ud.tag, d): | ||
110 | runfetchcmd("rm * -Rf", d) | ||
111 | runfetchcmd("git fetch %s://%s%s%s %s" % (ud.proto, username, ud.host, ud.path, ud.branch), d) | ||
112 | runfetchcmd("git fetch --tags %s://%s%s%s" % (ud.proto, username, ud.host, ud.path), d) | ||
113 | runfetchcmd("git prune-packed", d) | ||
114 | runfetchcmd("git pack-redundant --all | xargs -r rm", d) | ||
115 | |||
116 | os.chdir(ud.clonedir) | ||
117 | mirror_tarballs = data.getVar("BB_GENERATE_MIRROR_TARBALLS", d, True) | ||
118 | if mirror_tarballs != "0" or 'fullclone' in ud.parm: | ||
119 | bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git repository") | ||
120 | runfetchcmd("tar -czf %s %s" % (repofile, os.path.join(".", ".git", "*") ), d) | ||
121 | |||
122 | if 'fullclone' in ud.parm: | ||
123 | return | ||
124 | |||
125 | if os.path.exists(codir): | ||
126 | bb.utils.prunedir(codir) | ||
127 | |||
128 | subdir = ud.parm.get("subpath", "") | ||
129 | if subdir != "": | ||
130 | if subdir.endswith("/"): | ||
131 | subdirbase = os.path.basename(subdir[:-1]) | ||
132 | else: | ||
133 | subdirbase = os.path.basename(subdir) | ||
134 | else: | ||
135 | subdirbase = "" | ||
136 | |||
137 | if subdir != "": | ||
138 | readpathspec = ":%s" % (subdir) | ||
139 | codir = os.path.join(codir, "git") | ||
140 | coprefix = os.path.join(codir, subdirbase, "") | ||
141 | else: | ||
142 | readpathspec = "" | ||
143 | coprefix = os.path.join(codir, "git", "") | ||
144 | |||
145 | bb.mkdirhier(codir) | ||
146 | os.chdir(ud.clonedir) | ||
147 | runfetchcmd("git read-tree %s%s" % (ud.tag, readpathspec), d) | ||
148 | runfetchcmd("git checkout-index -q -f --prefix=%s -a" % (coprefix), d) | ||
149 | |||
150 | os.chdir(codir) | ||
151 | bb.msg.note(1, bb.msg.domain.Fetcher, "Creating tarball of git checkout") | ||
152 | runfetchcmd("tar -czf %s %s" % (ud.localpath, os.path.join(".", "*") ), d) | ||
153 | |||
154 | os.chdir(ud.clonedir) | ||
155 | bb.utils.prunedir(codir) | ||
156 | |||
157 | def suppports_srcrev(self): | ||
158 | return True | ||
159 | |||
160 | def _contains_ref(self, tag, d): | ||
161 | output = runfetchcmd("git log --pretty=oneline -n 1 %s -- 2> /dev/null | wc -l" % tag, d, quiet=True) | ||
162 | return output.split()[0] != "0" | ||
163 | |||
164 | def _revision_key(self, url, ud, d): | ||
165 | """ | ||
166 | Return a unique key for the url | ||
167 | """ | ||
168 | return "git:" + ud.host + ud.path.replace('/', '.') | ||
169 | |||
170 | def _latest_revision(self, url, ud, d): | ||
171 | """ | ||
172 | Compute the HEAD revision for the url | ||
173 | """ | ||
174 | if ud.user: | ||
175 | username = ud.user + '@' | ||
176 | else: | ||
177 | username = "" | ||
178 | |||
179 | cmd = "git ls-remote %s://%s%s%s %s" % (ud.proto, username, ud.host, ud.path, ud.branch) | ||
180 | output = runfetchcmd(cmd, d, True) | ||
181 | if not output: | ||
182 | raise bb.fetch.FetchError("Fetch command %s gave empty output\n" % (cmd)) | ||
183 | return output.split()[0] | ||
184 | |||
185 | def _build_revision(self, url, ud, d): | ||
186 | return ud.tag | ||
187 | |||
188 | def _sortable_buildindex_disabled(self, url, ud, d, rev): | ||
189 | """ | ||
190 | Return a suitable buildindex for the revision specified. This is done by counting revisions | ||
191 | using "git rev-list" which may or may not work in different circumstances. | ||
192 | """ | ||
193 | |||
194 | cwd = os.getcwd() | ||
195 | |||
196 | # Check if we have the rev already | ||
197 | |||
198 | if not os.path.exists(ud.clonedir): | ||
199 | print "no repo" | ||
200 | self.go(None, ud, d) | ||
201 | if not os.path.exists(ud.clonedir): | ||
202 | bb.msg.error(bb.msg.domain.Fetcher, "GIT repository for %s doesn't exist in %s, cannot get sortable buildnumber, using old value" % (url, ud.clonedir)) | ||
203 | return None | ||
204 | |||
205 | |||
206 | os.chdir(ud.clonedir) | ||
207 | if not self._contains_ref(rev, d): | ||
208 | self.go(None, ud, d) | ||
209 | |||
210 | output = runfetchcmd("git rev-list %s -- 2> /dev/null | wc -l" % rev, d, quiet=True) | ||
211 | os.chdir(cwd) | ||
212 | |||
213 | buildindex = "%s" % output.split()[0] | ||
214 | bb.msg.debug(1, bb.msg.domain.Fetcher, "GIT repository for %s in %s is returning %s revisions in rev-list before %s" % (url, repodir, buildindex, rev)) | ||
215 | return buildindex | ||
216 | |||
diff --git a/bitbake-dev/lib/bb/fetch/hg.py b/bitbake-dev/lib/bb/fetch/hg.py deleted file mode 100644 index 08cb61fc28..0000000000 --- a/bitbake-dev/lib/bb/fetch/hg.py +++ /dev/null | |||
@@ -1,178 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Fetch' implementation for mercurial DRCS (hg). | ||
5 | |||
6 | """ | ||
7 | |||
8 | # Copyright (C) 2003, 2004 Chris Larson | ||
9 | # Copyright (C) 2004 Marcin Juszkiewicz | ||
10 | # Copyright (C) 2007 Robert Schuster | ||
11 | # | ||
12 | # This program is free software; you can redistribute it and/or modify | ||
13 | # it under the terms of the GNU General Public License version 2 as | ||
14 | # published by the Free Software Foundation. | ||
15 | # | ||
16 | # This program is distributed in the hope that it will be useful, | ||
17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | # GNU General Public License for more details. | ||
20 | # | ||
21 | # You should have received a copy of the GNU General Public License along | ||
22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
24 | # | ||
25 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
26 | |||
27 | import os | ||
28 | import sys | ||
29 | import bb | ||
30 | from bb import data | ||
31 | from bb.fetch import Fetch | ||
32 | from bb.fetch import FetchError | ||
33 | from bb.fetch import MissingParameterError | ||
34 | from bb.fetch import runfetchcmd | ||
35 | |||
36 | class Hg(Fetch): | ||
37 | """Class to fetch a from mercurial repositories""" | ||
38 | def supports(self, url, ud, d): | ||
39 | """ | ||
40 | Check to see if a given url can be fetched with mercurial. | ||
41 | """ | ||
42 | return ud.type in ['hg'] | ||
43 | |||
44 | def localpath(self, url, ud, d): | ||
45 | if not "module" in ud.parm: | ||
46 | raise MissingParameterError("hg method needs a 'module' parameter") | ||
47 | |||
48 | ud.module = ud.parm["module"] | ||
49 | |||
50 | # Create paths to mercurial checkouts | ||
51 | relpath = ud.path | ||
52 | if relpath.startswith('/'): | ||
53 | # Remove leading slash as os.path.join can't cope | ||
54 | relpath = relpath[1:] | ||
55 | ud.pkgdir = os.path.join(data.expand('${HGDIR}', d), ud.host, relpath) | ||
56 | ud.moddir = os.path.join(ud.pkgdir, ud.module) | ||
57 | |||
58 | if 'rev' in ud.parm: | ||
59 | ud.revision = ud.parm['rev'] | ||
60 | else: | ||
61 | tag = Fetch.srcrev_internal_helper(ud, d) | ||
62 | if tag is True: | ||
63 | ud.revision = self.latest_revision(url, ud, d) | ||
64 | elif tag: | ||
65 | ud.revision = tag | ||
66 | else: | ||
67 | ud.revision = self.latest_revision(url, ud, d) | ||
68 | |||
69 | ud.localfile = data.expand('%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision), d) | ||
70 | |||
71 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
72 | |||
73 | def _buildhgcommand(self, ud, d, command): | ||
74 | """ | ||
75 | Build up an hg commandline based on ud | ||
76 | command is "fetch", "update", "info" | ||
77 | """ | ||
78 | |||
79 | basecmd = data.expand('${FETCHCMD_hg}', d) | ||
80 | |||
81 | proto = "http" | ||
82 | if "proto" in ud.parm: | ||
83 | proto = ud.parm["proto"] | ||
84 | |||
85 | host = ud.host | ||
86 | if proto == "file": | ||
87 | host = "/" | ||
88 | ud.host = "localhost" | ||
89 | |||
90 | if not ud.user: | ||
91 | hgroot = host + ud.path | ||
92 | else: | ||
93 | hgroot = ud.user + "@" + host + ud.path | ||
94 | |||
95 | if command is "info": | ||
96 | return "%s identify -i %s://%s/%s" % (basecmd, proto, hgroot, ud.module) | ||
97 | |||
98 | options = []; | ||
99 | if ud.revision: | ||
100 | options.append("-r %s" % ud.revision) | ||
101 | |||
102 | if command is "fetch": | ||
103 | cmd = "%s clone %s %s://%s/%s %s" % (basecmd, " ".join(options), proto, hgroot, ud.module, ud.module) | ||
104 | elif command is "pull": | ||
105 | # do not pass options list; limiting pull to rev causes the local | ||
106 | # repo not to contain it and immediately following "update" command | ||
107 | # will crash | ||
108 | cmd = "%s pull" % (basecmd) | ||
109 | elif command is "update": | ||
110 | cmd = "%s update -C %s" % (basecmd, " ".join(options)) | ||
111 | else: | ||
112 | raise FetchError("Invalid hg command %s" % command) | ||
113 | |||
114 | return cmd | ||
115 | |||
116 | def go(self, loc, ud, d): | ||
117 | """Fetch url""" | ||
118 | |||
119 | # try to use the tarball stash | ||
120 | if Fetch.try_mirror(d, ud.localfile): | ||
121 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping hg checkout." % ud.localpath) | ||
122 | return | ||
123 | |||
124 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'") | ||
125 | |||
126 | if os.access(os.path.join(ud.moddir, '.hg'), os.R_OK): | ||
127 | updatecmd = self._buildhgcommand(ud, d, "pull") | ||
128 | bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) | ||
129 | # update sources there | ||
130 | os.chdir(ud.moddir) | ||
131 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd) | ||
132 | runfetchcmd(updatecmd, d) | ||
133 | |||
134 | else: | ||
135 | fetchcmd = self._buildhgcommand(ud, d, "fetch") | ||
136 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
137 | # check out sources there | ||
138 | bb.mkdirhier(ud.pkgdir) | ||
139 | os.chdir(ud.pkgdir) | ||
140 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % fetchcmd) | ||
141 | runfetchcmd(fetchcmd, d) | ||
142 | |||
143 | # Even when we clone (fetch), we still need to update as hg's clone | ||
144 | # won't checkout the specified revision if its on a branch | ||
145 | updatecmd = self._buildhgcommand(ud, d, "update") | ||
146 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % updatecmd) | ||
147 | runfetchcmd(updatecmd, d) | ||
148 | |||
149 | os.chdir(ud.pkgdir) | ||
150 | try: | ||
151 | runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d) | ||
152 | except: | ||
153 | t, v, tb = sys.exc_info() | ||
154 | try: | ||
155 | os.unlink(ud.localpath) | ||
156 | except OSError: | ||
157 | pass | ||
158 | raise t, v, tb | ||
159 | |||
160 | def suppports_srcrev(self): | ||
161 | return True | ||
162 | |||
163 | def _latest_revision(self, url, ud, d): | ||
164 | """ | ||
165 | Compute tip revision for the url | ||
166 | """ | ||
167 | output = runfetchcmd(self._buildhgcommand(ud, d, "info"), d) | ||
168 | return output.strip() | ||
169 | |||
170 | def _build_revision(self, url, ud, d): | ||
171 | return ud.revision | ||
172 | |||
173 | def _revision_key(self, url, ud, d): | ||
174 | """ | ||
175 | Return a unique key for the url | ||
176 | """ | ||
177 | return "hg:" + ud.moddir | ||
178 | |||
diff --git a/bitbake-dev/lib/bb/fetch/local.py b/bitbake-dev/lib/bb/fetch/local.py deleted file mode 100644 index f9bdf589cb..0000000000 --- a/bitbake-dev/lib/bb/fetch/local.py +++ /dev/null | |||
@@ -1,72 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Fetch' implementations | ||
5 | |||
6 | Classes for obtaining upstream sources for the | ||
7 | BitBake build tools. | ||
8 | |||
9 | """ | ||
10 | |||
11 | # Copyright (C) 2003, 2004 Chris Larson | ||
12 | # | ||
13 | # This program is free software; you can redistribute it and/or modify | ||
14 | # it under the terms of the GNU General Public License version 2 as | ||
15 | # published by the Free Software Foundation. | ||
16 | # | ||
17 | # This program is distributed in the hope that it will be useful, | ||
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | # GNU General Public License for more details. | ||
21 | # | ||
22 | # You should have received a copy of the GNU General Public License along | ||
23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
25 | # | ||
26 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
27 | |||
28 | import os | ||
29 | import bb | ||
30 | from bb import data | ||
31 | from bb.fetch import Fetch | ||
32 | |||
33 | class Local(Fetch): | ||
34 | def supports(self, url, urldata, d): | ||
35 | """ | ||
36 | Check to see if a given url represents a local fetch. | ||
37 | """ | ||
38 | return urldata.type in ['file'] | ||
39 | |||
40 | def localpath(self, url, urldata, d): | ||
41 | """ | ||
42 | Return the local filename of a given url assuming a successful fetch. | ||
43 | """ | ||
44 | path = url.split("://")[1] | ||
45 | path = path.split(";")[0] | ||
46 | newpath = path | ||
47 | if path[0] != "/": | ||
48 | filespath = data.getVar('FILESPATH', d, 1) | ||
49 | if filespath: | ||
50 | newpath = bb.which(filespath, path) | ||
51 | if not newpath: | ||
52 | filesdir = data.getVar('FILESDIR', d, 1) | ||
53 | if filesdir: | ||
54 | newpath = os.path.join(filesdir, path) | ||
55 | # We don't set localfile as for this fetcher the file is already local! | ||
56 | return newpath | ||
57 | |||
58 | def go(self, url, urldata, d): | ||
59 | """Fetch urls (no-op for Local method)""" | ||
60 | # no need to fetch local files, we'll deal with them in place. | ||
61 | return 1 | ||
62 | |||
63 | def checkstatus(self, url, urldata, d): | ||
64 | """ | ||
65 | Check the status of the url | ||
66 | """ | ||
67 | if urldata.localpath.find("*") != -1: | ||
68 | bb.msg.note(1, bb.msg.domain.Fetcher, "URL %s looks like a glob and was therefore not checked." % url) | ||
69 | return True | ||
70 | if os.path.exists(urldata.localpath): | ||
71 | return True | ||
72 | return False | ||
diff --git a/bitbake-dev/lib/bb/fetch/osc.py b/bitbake-dev/lib/bb/fetch/osc.py deleted file mode 100644 index 2c34caf6c9..0000000000 --- a/bitbake-dev/lib/bb/fetch/osc.py +++ /dev/null | |||
@@ -1,155 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | Bitbake "Fetch" implementation for osc (Opensuse build service client). | ||
5 | Based on the svn "Fetch" implementation. | ||
6 | |||
7 | """ | ||
8 | |||
9 | import os | ||
10 | import sys | ||
11 | import bb | ||
12 | from bb import data | ||
13 | from bb.fetch import Fetch | ||
14 | from bb.fetch import FetchError | ||
15 | from bb.fetch import MissingParameterError | ||
16 | from bb.fetch import runfetchcmd | ||
17 | |||
18 | class Osc(Fetch): | ||
19 | """Class to fetch a module or modules from Opensuse build server | ||
20 | repositories.""" | ||
21 | |||
22 | def supports(self, url, ud, d): | ||
23 | """ | ||
24 | Check to see if a given url can be fetched with osc. | ||
25 | """ | ||
26 | return ud.type in ['osc'] | ||
27 | |||
28 | def localpath(self, url, ud, d): | ||
29 | if not "module" in ud.parm: | ||
30 | raise MissingParameterError("osc method needs a 'module' parameter.") | ||
31 | |||
32 | ud.module = ud.parm["module"] | ||
33 | |||
34 | # Create paths to osc checkouts | ||
35 | relpath = ud.path | ||
36 | if relpath.startswith('/'): | ||
37 | # Remove leading slash as os.path.join can't cope | ||
38 | relpath = relpath[1:] | ||
39 | ud.pkgdir = os.path.join(data.expand('${OSCDIR}', d), ud.host) | ||
40 | ud.moddir = os.path.join(ud.pkgdir, relpath, ud.module) | ||
41 | |||
42 | if 'rev' in ud.parm: | ||
43 | ud.revision = ud.parm['rev'] | ||
44 | else: | ||
45 | pv = data.getVar("PV", d, 0) | ||
46 | rev = Fetch.srcrev_internal_helper(ud, d) | ||
47 | if rev and rev != True: | ||
48 | ud.revision = rev | ||
49 | else: | ||
50 | ud.revision = "" | ||
51 | |||
52 | ud.localfile = data.expand('%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.path.replace('/', '.'), ud.revision), d) | ||
53 | |||
54 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
55 | |||
56 | def _buildosccommand(self, ud, d, command): | ||
57 | """ | ||
58 | Build up an ocs commandline based on ud | ||
59 | command is "fetch", "update", "info" | ||
60 | """ | ||
61 | |||
62 | basecmd = data.expand('${FETCHCMD_osc}', d) | ||
63 | |||
64 | proto = "ocs" | ||
65 | if "proto" in ud.parm: | ||
66 | proto = ud.parm["proto"] | ||
67 | |||
68 | options = [] | ||
69 | |||
70 | config = "-c %s" % self.generate_config(ud, d) | ||
71 | |||
72 | if ud.revision: | ||
73 | options.append("-r %s" % ud.revision) | ||
74 | |||
75 | coroot = ud.path | ||
76 | if coroot.startswith('/'): | ||
77 | # Remove leading slash as os.path.join can't cope | ||
78 | coroot= coroot[1:] | ||
79 | |||
80 | if command is "fetch": | ||
81 | osccmd = "%s %s co %s/%s %s" % (basecmd, config, coroot, ud.module, " ".join(options)) | ||
82 | elif command is "update": | ||
83 | osccmd = "%s %s up %s" % (basecmd, config, " ".join(options)) | ||
84 | else: | ||
85 | raise FetchError("Invalid osc command %s" % command) | ||
86 | |||
87 | return osccmd | ||
88 | |||
89 | def go(self, loc, ud, d): | ||
90 | """ | ||
91 | Fetch url | ||
92 | """ | ||
93 | |||
94 | # Try to use the tarball stash | ||
95 | if Fetch.try_mirror(d, ud.localfile): | ||
96 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping osc checkout." % ud.localpath) | ||
97 | return | ||
98 | |||
99 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'") | ||
100 | |||
101 | if os.access(os.path.join(data.expand('${OSCDIR}', d), ud.path, ud.module), os.R_OK): | ||
102 | oscupdatecmd = self._buildosccommand(ud, d, "update") | ||
103 | bb.msg.note(1, bb.msg.domain.Fetcher, "Update "+ loc) | ||
104 | # update sources there | ||
105 | os.chdir(ud.moddir) | ||
106 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % oscupdatecmd) | ||
107 | runfetchcmd(oscupdatecmd, d) | ||
108 | else: | ||
109 | oscfetchcmd = self._buildosccommand(ud, d, "fetch") | ||
110 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
111 | # check out sources there | ||
112 | bb.mkdirhier(ud.pkgdir) | ||
113 | os.chdir(ud.pkgdir) | ||
114 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % oscfetchcmd) | ||
115 | runfetchcmd(oscfetchcmd, d) | ||
116 | |||
117 | os.chdir(os.path.join(ud.pkgdir + ud.path)) | ||
118 | # tar them up to a defined filename | ||
119 | try: | ||
120 | runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d) | ||
121 | except: | ||
122 | t, v, tb = sys.exc_info() | ||
123 | try: | ||
124 | os.unlink(ud.localpath) | ||
125 | except OSError: | ||
126 | pass | ||
127 | raise t, v, tb | ||
128 | |||
129 | def supports_srcrev(self): | ||
130 | return False | ||
131 | |||
132 | def generate_config(self, ud, d): | ||
133 | """ | ||
134 | Generate a .oscrc to be used for this run. | ||
135 | """ | ||
136 | |||
137 | config_path = "%s/oscrc" % data.expand('${OSCDIR}', d) | ||
138 | if (os.path.exists(config_path)): | ||
139 | os.remove(config_path) | ||
140 | |||
141 | f = open(config_path, 'w') | ||
142 | f.write("[general]\n") | ||
143 | f.write("apisrv = %s\n" % ud.host) | ||
144 | f.write("scheme = http\n") | ||
145 | f.write("su-wrapper = su -c\n") | ||
146 | f.write("build-root = %s\n" % data.expand('${WORKDIR}', d)) | ||
147 | f.write("urllist = http://moblin-obs.jf.intel.com:8888/build/%(project)s/%(repository)s/%(buildarch)s/:full/%(name)s.rpm\n") | ||
148 | f.write("extra-pkgs = gzip\n") | ||
149 | f.write("\n") | ||
150 | f.write("[%s]\n" % ud.host) | ||
151 | f.write("user = %s\n" % ud.parm["user"]) | ||
152 | f.write("pass = %s\n" % ud.parm["pswd"]) | ||
153 | f.close() | ||
154 | |||
155 | return config_path | ||
diff --git a/bitbake-dev/lib/bb/fetch/perforce.py b/bitbake-dev/lib/bb/fetch/perforce.py deleted file mode 100644 index 394f5a2253..0000000000 --- a/bitbake-dev/lib/bb/fetch/perforce.py +++ /dev/null | |||
@@ -1,214 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Fetch' implementations | ||
5 | |||
6 | Classes for obtaining upstream sources for the | ||
7 | BitBake build tools. | ||
8 | |||
9 | """ | ||
10 | |||
11 | # Copyright (C) 2003, 2004 Chris Larson | ||
12 | # | ||
13 | # This program is free software; you can redistribute it and/or modify | ||
14 | # it under the terms of the GNU General Public License version 2 as | ||
15 | # published by the Free Software Foundation. | ||
16 | # | ||
17 | # This program is distributed in the hope that it will be useful, | ||
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | # GNU General Public License for more details. | ||
21 | # | ||
22 | # You should have received a copy of the GNU General Public License along | ||
23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
25 | # | ||
26 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
27 | |||
28 | import os | ||
29 | import bb | ||
30 | from bb import data | ||
31 | from bb.fetch import Fetch | ||
32 | from bb.fetch import FetchError | ||
33 | |||
34 | class Perforce(Fetch): | ||
35 | def supports(self, url, ud, d): | ||
36 | return ud.type in ['p4'] | ||
37 | |||
38 | def doparse(url,d): | ||
39 | parm = {} | ||
40 | path = url.split("://")[1] | ||
41 | delim = path.find("@"); | ||
42 | if delim != -1: | ||
43 | (user,pswd,host,port) = path.split('@')[0].split(":") | ||
44 | path = path.split('@')[1] | ||
45 | else: | ||
46 | (host,port) = data.getVar('P4PORT', d).split(':') | ||
47 | user = "" | ||
48 | pswd = "" | ||
49 | |||
50 | if path.find(";") != -1: | ||
51 | keys=[] | ||
52 | values=[] | ||
53 | plist = path.split(';') | ||
54 | for item in plist: | ||
55 | if item.count('='): | ||
56 | (key,value) = item.split('=') | ||
57 | keys.append(key) | ||
58 | values.append(value) | ||
59 | |||
60 | parm = dict(zip(keys,values)) | ||
61 | path = "//" + path.split(';')[0] | ||
62 | host += ":%s" % (port) | ||
63 | parm["cset"] = Perforce.getcset(d, path, host, user, pswd, parm) | ||
64 | |||
65 | return host,path,user,pswd,parm | ||
66 | doparse = staticmethod(doparse) | ||
67 | |||
68 | def getcset(d, depot,host,user,pswd,parm): | ||
69 | p4opt = "" | ||
70 | if "cset" in parm: | ||
71 | return parm["cset"]; | ||
72 | if user: | ||
73 | p4opt += " -u %s" % (user) | ||
74 | if pswd: | ||
75 | p4opt += " -P %s" % (pswd) | ||
76 | if host: | ||
77 | p4opt += " -p %s" % (host) | ||
78 | |||
79 | p4date = data.getVar("P4DATE", d, 1) | ||
80 | if "revision" in parm: | ||
81 | depot += "#%s" % (parm["revision"]) | ||
82 | elif "label" in parm: | ||
83 | depot += "@%s" % (parm["label"]) | ||
84 | elif p4date: | ||
85 | depot += "@%s" % (p4date) | ||
86 | |||
87 | p4cmd = data.getVar('FETCHCOMMAND_p4', d, 1) | ||
88 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s%s changes -m 1 %s" % (p4cmd, p4opt, depot)) | ||
89 | p4file = os.popen("%s%s changes -m 1 %s" % (p4cmd, p4opt, depot)) | ||
90 | cset = p4file.readline().strip() | ||
91 | bb.msg.debug(1, bb.msg.domain.Fetcher, "READ %s" % (cset)) | ||
92 | if not cset: | ||
93 | return -1 | ||
94 | |||
95 | return cset.split(' ')[1] | ||
96 | getcset = staticmethod(getcset) | ||
97 | |||
98 | def localpath(self, url, ud, d): | ||
99 | |||
100 | (host,path,user,pswd,parm) = Perforce.doparse(url,d) | ||
101 | |||
102 | # If a label is specified, we use that as our filename | ||
103 | |||
104 | if "label" in parm: | ||
105 | ud.localfile = "%s.tar.gz" % (parm["label"]) | ||
106 | return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile) | ||
107 | |||
108 | base = path | ||
109 | which = path.find('/...') | ||
110 | if which != -1: | ||
111 | base = path[:which] | ||
112 | |||
113 | if base[0] == "/": | ||
114 | base = base[1:] | ||
115 | |||
116 | cset = Perforce.getcset(d, path, host, user, pswd, parm) | ||
117 | |||
118 | ud.localfile = data.expand('%s+%s+%s.tar.gz' % (host,base.replace('/', '.'), cset), d) | ||
119 | |||
120 | return os.path.join(data.getVar("DL_DIR", d, 1), ud.localfile) | ||
121 | |||
122 | def go(self, loc, ud, d): | ||
123 | """ | ||
124 | Fetch urls | ||
125 | """ | ||
126 | |||
127 | # try to use the tarball stash | ||
128 | if Fetch.try_mirror(d, ud.localfile): | ||
129 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping perforce checkout." % ud.localpath) | ||
130 | return | ||
131 | |||
132 | (host,depot,user,pswd,parm) = Perforce.doparse(loc, d) | ||
133 | |||
134 | if depot.find('/...') != -1: | ||
135 | path = depot[:depot.find('/...')] | ||
136 | else: | ||
137 | path = depot | ||
138 | |||
139 | if "module" in parm: | ||
140 | module = parm["module"] | ||
141 | else: | ||
142 | module = os.path.basename(path) | ||
143 | |||
144 | localdata = data.createCopy(d) | ||
145 | data.setVar('OVERRIDES', "p4:%s" % data.getVar('OVERRIDES', localdata), localdata) | ||
146 | data.update_data(localdata) | ||
147 | |||
148 | # Get the p4 command | ||
149 | p4opt = "" | ||
150 | if user: | ||
151 | p4opt += " -u %s" % (user) | ||
152 | |||
153 | if pswd: | ||
154 | p4opt += " -P %s" % (pswd) | ||
155 | |||
156 | if host: | ||
157 | p4opt += " -p %s" % (host) | ||
158 | |||
159 | p4cmd = data.getVar('FETCHCOMMAND', localdata, 1) | ||
160 | |||
161 | # create temp directory | ||
162 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory") | ||
163 | bb.mkdirhier(data.expand('${WORKDIR}', localdata)) | ||
164 | data.setVar('TMPBASE', data.expand('${WORKDIR}/oep4.XXXXXX', localdata), localdata) | ||
165 | tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") | ||
166 | tmpfile = tmppipe.readline().strip() | ||
167 | if not tmpfile: | ||
168 | bb.error("Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") | ||
169 | raise FetchError(module) | ||
170 | |||
171 | if "label" in parm: | ||
172 | depot = "%s@%s" % (depot,parm["label"]) | ||
173 | else: | ||
174 | cset = Perforce.getcset(d, depot, host, user, pswd, parm) | ||
175 | depot = "%s@%s" % (depot,cset) | ||
176 | |||
177 | os.chdir(tmpfile) | ||
178 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
179 | bb.msg.note(1, bb.msg.domain.Fetcher, "%s%s files %s" % (p4cmd, p4opt, depot)) | ||
180 | p4file = os.popen("%s%s files %s" % (p4cmd, p4opt, depot)) | ||
181 | |||
182 | if not p4file: | ||
183 | bb.error("Fetch: unable to get the P4 files from %s" % (depot)) | ||
184 | raise FetchError(module) | ||
185 | |||
186 | count = 0 | ||
187 | |||
188 | for file in p4file: | ||
189 | list = file.split() | ||
190 | |||
191 | if list[2] == "delete": | ||
192 | continue | ||
193 | |||
194 | dest = list[0][len(path)+1:] | ||
195 | where = dest.find("#") | ||
196 | |||
197 | os.system("%s%s print -o %s/%s %s" % (p4cmd, p4opt, module,dest[:where],list[0])) | ||
198 | count = count + 1 | ||
199 | |||
200 | if count == 0: | ||
201 | bb.error("Fetch: No files gathered from the P4 fetch") | ||
202 | raise FetchError(module) | ||
203 | |||
204 | myret = os.system("tar -czf %s %s" % (ud.localpath, module)) | ||
205 | if myret != 0: | ||
206 | try: | ||
207 | os.unlink(ud.localpath) | ||
208 | except OSError: | ||
209 | pass | ||
210 | raise FetchError(module) | ||
211 | # cleanup | ||
212 | os.system('rm -rf %s' % tmpfile) | ||
213 | |||
214 | |||
diff --git a/bitbake-dev/lib/bb/fetch/ssh.py b/bitbake-dev/lib/bb/fetch/ssh.py deleted file mode 100644 index 68e6fdb1df..0000000000 --- a/bitbake-dev/lib/bb/fetch/ssh.py +++ /dev/null | |||
@@ -1,118 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | ''' | ||
4 | BitBake 'Fetch' implementations | ||
5 | |||
6 | This implementation is for Secure Shell (SSH), and attempts to comply with the | ||
7 | IETF secsh internet draft: | ||
8 | http://tools.ietf.org/wg/secsh/draft-ietf-secsh-scp-sftp-ssh-uri/ | ||
9 | |||
10 | Currently does not support the sftp parameters, as this uses scp | ||
11 | Also does not support the 'fingerprint' connection parameter. | ||
12 | |||
13 | ''' | ||
14 | |||
15 | # Copyright (C) 2006 OpenedHand Ltd. | ||
16 | # | ||
17 | # | ||
18 | # Based in part on svk.py: | ||
19 | # Copyright (C) 2006 Holger Hans Peter Freyther | ||
20 | # Based on svn.py: | ||
21 | # Copyright (C) 2003, 2004 Chris Larson | ||
22 | # Based on functions from the base bb module: | ||
23 | # Copyright 2003 Holger Schurig | ||
24 | # | ||
25 | # | ||
26 | # This program is free software; you can redistribute it and/or modify | ||
27 | # it under the terms of the GNU General Public License version 2 as | ||
28 | # published by the Free Software Foundation. | ||
29 | # | ||
30 | # This program is distributed in the hope that it will be useful, | ||
31 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
32 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
33 | # GNU General Public License for more details. | ||
34 | # | ||
35 | # You should have received a copy of the GNU General Public License along | ||
36 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
37 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
38 | |||
39 | import re, os | ||
40 | from bb import data | ||
41 | from bb.fetch import Fetch | ||
42 | from bb.fetch import FetchError | ||
43 | |||
44 | |||
45 | __pattern__ = re.compile(r''' | ||
46 | \s* # Skip leading whitespace | ||
47 | ssh:// # scheme | ||
48 | ( # Optional username/password block | ||
49 | (?P<user>\S+) # username | ||
50 | (:(?P<pass>\S+))? # colon followed by the password (optional) | ||
51 | )? | ||
52 | (?P<cparam>(;[^;]+)*)? # connection parameters block (optional) | ||
53 | @ | ||
54 | (?P<host>\S+?) # non-greedy match of the host | ||
55 | (:(?P<port>[0-9]+))? # colon followed by the port (optional) | ||
56 | / | ||
57 | (?P<path>[^;]+) # path on the remote system, may be absolute or relative, | ||
58 | # and may include the use of '~' to reference the remote home | ||
59 | # directory | ||
60 | (?P<sparam>(;[^;]+)*)? # parameters block (optional) | ||
61 | $ | ||
62 | ''', re.VERBOSE) | ||
63 | |||
64 | class SSH(Fetch): | ||
65 | '''Class to fetch a module or modules via Secure Shell''' | ||
66 | |||
67 | def supports(self, url, urldata, d): | ||
68 | return __pattern__.match(url) != None | ||
69 | |||
70 | def localpath(self, url, urldata, d): | ||
71 | m = __pattern__.match(url) | ||
72 | path = m.group('path') | ||
73 | host = m.group('host') | ||
74 | lpath = os.path.join(data.getVar('DL_DIR', d, True), host, os.path.basename(path)) | ||
75 | return lpath | ||
76 | |||
77 | def go(self, url, urldata, d): | ||
78 | dldir = data.getVar('DL_DIR', d, 1) | ||
79 | |||
80 | m = __pattern__.match(url) | ||
81 | path = m.group('path') | ||
82 | host = m.group('host') | ||
83 | port = m.group('port') | ||
84 | user = m.group('user') | ||
85 | password = m.group('pass') | ||
86 | |||
87 | ldir = os.path.join(dldir, host) | ||
88 | lpath = os.path.join(ldir, os.path.basename(path)) | ||
89 | |||
90 | if not os.path.exists(ldir): | ||
91 | os.makedirs(ldir) | ||
92 | |||
93 | if port: | ||
94 | port = '-P %s' % port | ||
95 | else: | ||
96 | port = '' | ||
97 | |||
98 | if user: | ||
99 | fr = user | ||
100 | if password: | ||
101 | fr += ':%s' % password | ||
102 | fr += '@%s' % host | ||
103 | else: | ||
104 | fr = host | ||
105 | fr += ':%s' % path | ||
106 | |||
107 | |||
108 | import commands | ||
109 | cmd = 'scp -B -r %s %s %s/' % ( | ||
110 | port, | ||
111 | commands.mkarg(fr), | ||
112 | commands.mkarg(ldir) | ||
113 | ) | ||
114 | |||
115 | (exitstatus, output) = commands.getstatusoutput(cmd) | ||
116 | if exitstatus != 0: | ||
117 | print output | ||
118 | raise FetchError('Unable to fetch %s' % url) | ||
diff --git a/bitbake-dev/lib/bb/fetch/svk.py b/bitbake-dev/lib/bb/fetch/svk.py deleted file mode 100644 index 120dad9d4e..0000000000 --- a/bitbake-dev/lib/bb/fetch/svk.py +++ /dev/null | |||
@@ -1,109 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Fetch' implementations | ||
5 | |||
6 | This implementation is for svk. It is based on the svn implementation | ||
7 | |||
8 | """ | ||
9 | |||
10 | # Copyright (C) 2006 Holger Hans Peter Freyther | ||
11 | # Copyright (C) 2003, 2004 Chris Larson | ||
12 | # | ||
13 | # This program is free software; you can redistribute it and/or modify | ||
14 | # it under the terms of the GNU General Public License version 2 as | ||
15 | # published by the Free Software Foundation. | ||
16 | # | ||
17 | # This program is distributed in the hope that it will be useful, | ||
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | # GNU General Public License for more details. | ||
21 | # | ||
22 | # You should have received a copy of the GNU General Public License along | ||
23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
25 | # | ||
26 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
27 | |||
28 | import os | ||
29 | import bb | ||
30 | from bb import data | ||
31 | from bb.fetch import Fetch | ||
32 | from bb.fetch import FetchError | ||
33 | from bb.fetch import MissingParameterError | ||
34 | |||
35 | class Svk(Fetch): | ||
36 | """Class to fetch a module or modules from svk repositories""" | ||
37 | def supports(self, url, ud, d): | ||
38 | """ | ||
39 | Check to see if a given url can be fetched with svk. | ||
40 | """ | ||
41 | return ud.type in ['svk'] | ||
42 | |||
43 | def localpath(self, url, ud, d): | ||
44 | if not "module" in ud.parm: | ||
45 | raise MissingParameterError("svk method needs a 'module' parameter") | ||
46 | else: | ||
47 | ud.module = ud.parm["module"] | ||
48 | |||
49 | ud.revision = "" | ||
50 | if 'rev' in ud.parm: | ||
51 | ud.revision = ud.parm['rev'] | ||
52 | |||
53 | ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d) | ||
54 | |||
55 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
56 | |||
57 | def forcefetch(self, url, ud, d): | ||
58 | if (ud.date == "now"): | ||
59 | return True | ||
60 | return False | ||
61 | |||
62 | def go(self, loc, ud, d): | ||
63 | """Fetch urls""" | ||
64 | |||
65 | if not self.forcefetch(loc, ud, d) and Fetch.try_mirror(d, ud.localfile): | ||
66 | return | ||
67 | |||
68 | svkroot = ud.host + ud.path | ||
69 | |||
70 | svkcmd = "svk co -r {%s} %s/%s" % (ud.date, svkroot, ud.module) | ||
71 | |||
72 | if ud.revision: | ||
73 | svkcmd = "svk co -r %s %s/%s" % (ud.revision, svkroot, ud.module) | ||
74 | |||
75 | # create temp directory | ||
76 | localdata = data.createCopy(d) | ||
77 | data.update_data(localdata) | ||
78 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: creating temporary directory") | ||
79 | bb.mkdirhier(data.expand('${WORKDIR}', localdata)) | ||
80 | data.setVar('TMPBASE', data.expand('${WORKDIR}/oesvk.XXXXXX', localdata), localdata) | ||
81 | tmppipe = os.popen(data.getVar('MKTEMPDIRCMD', localdata, 1) or "false") | ||
82 | tmpfile = tmppipe.readline().strip() | ||
83 | if not tmpfile: | ||
84 | bb.msg.error(bb.msg.domain.Fetcher, "Fetch: unable to create temporary directory.. make sure 'mktemp' is in the PATH.") | ||
85 | raise FetchError(ud.module) | ||
86 | |||
87 | # check out sources there | ||
88 | os.chdir(tmpfile) | ||
89 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
90 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svkcmd) | ||
91 | myret = os.system(svkcmd) | ||
92 | if myret != 0: | ||
93 | try: | ||
94 | os.rmdir(tmpfile) | ||
95 | except OSError: | ||
96 | pass | ||
97 | raise FetchError(ud.module) | ||
98 | |||
99 | os.chdir(os.path.join(tmpfile, os.path.dirname(ud.module))) | ||
100 | # tar them up to a defined filename | ||
101 | myret = os.system("tar -czf %s %s" % (ud.localpath, os.path.basename(ud.module))) | ||
102 | if myret != 0: | ||
103 | try: | ||
104 | os.unlink(ud.localpath) | ||
105 | except OSError: | ||
106 | pass | ||
107 | raise FetchError(ud.module) | ||
108 | # cleanup | ||
109 | os.system('rm -rf %s' % tmpfile) | ||
diff --git a/bitbake-dev/lib/bb/fetch/svn.py b/bitbake-dev/lib/bb/fetch/svn.py deleted file mode 100644 index eef9862a84..0000000000 --- a/bitbake-dev/lib/bb/fetch/svn.py +++ /dev/null | |||
@@ -1,206 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Fetch' implementation for svn. | ||
5 | |||
6 | """ | ||
7 | |||
8 | # Copyright (C) 2003, 2004 Chris Larson | ||
9 | # Copyright (C) 2004 Marcin Juszkiewicz | ||
10 | # | ||
11 | # This program is free software; you can redistribute it and/or modify | ||
12 | # it under the terms of the GNU General Public License version 2 as | ||
13 | # published by the Free Software Foundation. | ||
14 | # | ||
15 | # This program is distributed in the hope that it will be useful, | ||
16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | # GNU General Public License for more details. | ||
19 | # | ||
20 | # You should have received a copy of the GNU General Public License along | ||
21 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
22 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
23 | # | ||
24 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
25 | |||
26 | import os | ||
27 | import sys | ||
28 | import bb | ||
29 | from bb import data | ||
30 | from bb.fetch import Fetch | ||
31 | from bb.fetch import FetchError | ||
32 | from bb.fetch import MissingParameterError | ||
33 | from bb.fetch import runfetchcmd | ||
34 | |||
35 | class Svn(Fetch): | ||
36 | """Class to fetch a module or modules from svn repositories""" | ||
37 | def supports(self, url, ud, d): | ||
38 | """ | ||
39 | Check to see if a given url can be fetched with svn. | ||
40 | """ | ||
41 | return ud.type in ['svn'] | ||
42 | |||
43 | def localpath(self, url, ud, d): | ||
44 | if not "module" in ud.parm: | ||
45 | raise MissingParameterError("svn method needs a 'module' parameter") | ||
46 | |||
47 | ud.module = ud.parm["module"] | ||
48 | |||
49 | # Create paths to svn checkouts | ||
50 | relpath = ud.path | ||
51 | if relpath.startswith('/'): | ||
52 | # Remove leading slash as os.path.join can't cope | ||
53 | relpath = relpath[1:] | ||
54 | ud.pkgdir = os.path.join(data.expand('${SVNDIR}', d), ud.host, relpath) | ||
55 | ud.moddir = os.path.join(ud.pkgdir, ud.module) | ||
56 | |||
57 | if 'rev' in ud.parm: | ||
58 | ud.date = "" | ||
59 | ud.revision = ud.parm['rev'] | ||
60 | elif 'date' in ud.date: | ||
61 | ud.date = ud.parm['date'] | ||
62 | ud.revision = "" | ||
63 | else: | ||
64 | # | ||
65 | # ***Nasty hack*** | ||
66 | # If DATE in unexpanded PV, use ud.date (which is set from SRCDATE) | ||
67 | # Should warn people to switch to SRCREV here | ||
68 | # | ||
69 | pv = data.getVar("PV", d, 0) | ||
70 | if "DATE" in pv: | ||
71 | ud.revision = "" | ||
72 | else: | ||
73 | rev = Fetch.srcrev_internal_helper(ud, d) | ||
74 | if rev is True: | ||
75 | ud.revision = self.latest_revision(url, ud, d) | ||
76 | ud.date = "" | ||
77 | elif rev: | ||
78 | ud.revision = rev | ||
79 | ud.date = "" | ||
80 | else: | ||
81 | ud.revision = "" | ||
82 | |||
83 | ud.localfile = data.expand('%s_%s_%s_%s_%s.tar.gz' % (ud.module.replace('/', '.'), ud.host, ud.path.replace('/', '.'), ud.revision, ud.date), d) | ||
84 | |||
85 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
86 | |||
87 | def _buildsvncommand(self, ud, d, command): | ||
88 | """ | ||
89 | Build up an svn commandline based on ud | ||
90 | command is "fetch", "update", "info" | ||
91 | """ | ||
92 | |||
93 | basecmd = data.expand('${FETCHCMD_svn}', d) | ||
94 | |||
95 | proto = "svn" | ||
96 | if "proto" in ud.parm: | ||
97 | proto = ud.parm["proto"] | ||
98 | |||
99 | svn_rsh = None | ||
100 | if proto == "svn+ssh" and "rsh" in ud.parm: | ||
101 | svn_rsh = ud.parm["rsh"] | ||
102 | |||
103 | svnroot = ud.host + ud.path | ||
104 | |||
105 | # either use the revision, or SRCDATE in braces, | ||
106 | options = [] | ||
107 | |||
108 | if ud.user: | ||
109 | options.append("--username %s" % ud.user) | ||
110 | |||
111 | if ud.pswd: | ||
112 | options.append("--password %s" % ud.pswd) | ||
113 | |||
114 | if command is "info": | ||
115 | svncmd = "%s info %s %s://%s/%s/" % (basecmd, " ".join(options), proto, svnroot, ud.module) | ||
116 | else: | ||
117 | suffix = "" | ||
118 | if ud.revision: | ||
119 | options.append("-r %s" % ud.revision) | ||
120 | suffix = "@%s" % (ud.revision) | ||
121 | elif ud.date: | ||
122 | options.append("-r {%s}" % ud.date) | ||
123 | |||
124 | if command is "fetch": | ||
125 | svncmd = "%s co %s %s://%s/%s%s %s" % (basecmd, " ".join(options), proto, svnroot, ud.module, suffix, ud.module) | ||
126 | elif command is "update": | ||
127 | svncmd = "%s update %s" % (basecmd, " ".join(options)) | ||
128 | else: | ||
129 | raise FetchError("Invalid svn command %s" % command) | ||
130 | |||
131 | if svn_rsh: | ||
132 | svncmd = "svn_RSH=\"%s\" %s" % (svn_rsh, svncmd) | ||
133 | |||
134 | return svncmd | ||
135 | |||
136 | def go(self, loc, ud, d): | ||
137 | """Fetch url""" | ||
138 | |||
139 | # try to use the tarball stash | ||
140 | if Fetch.try_mirror(d, ud.localfile): | ||
141 | bb.msg.debug(1, bb.msg.domain.Fetcher, "%s already exists or was mirrored, skipping svn checkout." % ud.localpath) | ||
142 | return | ||
143 | |||
144 | bb.msg.debug(2, bb.msg.domain.Fetcher, "Fetch: checking for module directory '" + ud.moddir + "'") | ||
145 | |||
146 | if os.access(os.path.join(ud.moddir, '.svn'), os.R_OK): | ||
147 | svnupdatecmd = self._buildsvncommand(ud, d, "update") | ||
148 | bb.msg.note(1, bb.msg.domain.Fetcher, "Update " + loc) | ||
149 | # update sources there | ||
150 | os.chdir(ud.moddir) | ||
151 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnupdatecmd) | ||
152 | runfetchcmd(svnupdatecmd, d) | ||
153 | else: | ||
154 | svnfetchcmd = self._buildsvncommand(ud, d, "fetch") | ||
155 | bb.msg.note(1, bb.msg.domain.Fetcher, "Fetch " + loc) | ||
156 | # check out sources there | ||
157 | bb.mkdirhier(ud.pkgdir) | ||
158 | os.chdir(ud.pkgdir) | ||
159 | bb.msg.debug(1, bb.msg.domain.Fetcher, "Running %s" % svnfetchcmd) | ||
160 | runfetchcmd(svnfetchcmd, d) | ||
161 | |||
162 | os.chdir(ud.pkgdir) | ||
163 | # tar them up to a defined filename | ||
164 | try: | ||
165 | runfetchcmd("tar -czf %s %s" % (ud.localpath, ud.module), d) | ||
166 | except: | ||
167 | t, v, tb = sys.exc_info() | ||
168 | try: | ||
169 | os.unlink(ud.localpath) | ||
170 | except OSError: | ||
171 | pass | ||
172 | raise t, v, tb | ||
173 | |||
174 | def suppports_srcrev(self): | ||
175 | return True | ||
176 | |||
177 | def _revision_key(self, url, ud, d): | ||
178 | """ | ||
179 | Return a unique key for the url | ||
180 | """ | ||
181 | return "svn:" + ud.moddir | ||
182 | |||
183 | def _latest_revision(self, url, ud, d): | ||
184 | """ | ||
185 | Return the latest upstream revision number | ||
186 | """ | ||
187 | bb.msg.debug(2, bb.msg.domain.Fetcher, "SVN fetcher hitting network for %s" % url) | ||
188 | |||
189 | output = runfetchcmd("LANG=C LC_ALL=C " + self._buildsvncommand(ud, d, "info"), d, True) | ||
190 | |||
191 | revision = None | ||
192 | for line in output.splitlines(): | ||
193 | if "Last Changed Rev" in line: | ||
194 | revision = line.split(":")[1].strip() | ||
195 | |||
196 | return revision | ||
197 | |||
198 | def _sortable_revision(self, url, ud, d): | ||
199 | """ | ||
200 | Return a sortable revision number which in our case is the revision number | ||
201 | """ | ||
202 | |||
203 | return self._build_revision(url, ud, d) | ||
204 | |||
205 | def _build_revision(self, url, ud, d): | ||
206 | return ud.revision | ||
diff --git a/bitbake-dev/lib/bb/fetch/wget.py b/bitbake-dev/lib/bb/fetch/wget.py deleted file mode 100644 index fd93c7ec46..0000000000 --- a/bitbake-dev/lib/bb/fetch/wget.py +++ /dev/null | |||
@@ -1,130 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'Fetch' implementations | ||
5 | |||
6 | Classes for obtaining upstream sources for the | ||
7 | BitBake build tools. | ||
8 | |||
9 | """ | ||
10 | |||
11 | # Copyright (C) 2003, 2004 Chris Larson | ||
12 | # | ||
13 | # This program is free software; you can redistribute it and/or modify | ||
14 | # it under the terms of the GNU General Public License version 2 as | ||
15 | # published by the Free Software Foundation. | ||
16 | # | ||
17 | # This program is distributed in the hope that it will be useful, | ||
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | # GNU General Public License for more details. | ||
21 | # | ||
22 | # You should have received a copy of the GNU General Public License along | ||
23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
25 | # | ||
26 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
27 | |||
28 | import os | ||
29 | import bb | ||
30 | from bb import data | ||
31 | from bb.fetch import Fetch | ||
32 | from bb.fetch import FetchError | ||
33 | from bb.fetch import uri_replace | ||
34 | |||
35 | class Wget(Fetch): | ||
36 | """Class to fetch urls via 'wget'""" | ||
37 | def supports(self, url, ud, d): | ||
38 | """ | ||
39 | Check to see if a given url can be fetched with wget. | ||
40 | """ | ||
41 | return ud.type in ['http','https','ftp'] | ||
42 | |||
43 | def localpath(self, url, ud, d): | ||
44 | |||
45 | url = bb.encodeurl([ud.type, ud.host, ud.path, ud.user, ud.pswd, {}]) | ||
46 | ud.basename = os.path.basename(ud.path) | ||
47 | ud.localfile = data.expand(os.path.basename(url), d) | ||
48 | |||
49 | return os.path.join(data.getVar("DL_DIR", d, True), ud.localfile) | ||
50 | |||
51 | def go(self, uri, ud, d, checkonly = False): | ||
52 | """Fetch urls""" | ||
53 | |||
54 | def fetch_uri(uri, ud, d): | ||
55 | if checkonly: | ||
56 | fetchcmd = data.getVar("CHECKCOMMAND", d, 1) | ||
57 | elif os.path.exists(ud.localpath): | ||
58 | # file exists, but we didnt complete it.. trying again.. | ||
59 | fetchcmd = data.getVar("RESUMECOMMAND", d, 1) | ||
60 | else: | ||
61 | fetchcmd = data.getVar("FETCHCOMMAND", d, 1) | ||
62 | |||
63 | uri = uri.split(";")[0] | ||
64 | uri_decoded = list(bb.decodeurl(uri)) | ||
65 | uri_type = uri_decoded[0] | ||
66 | uri_host = uri_decoded[1] | ||
67 | |||
68 | bb.msg.note(1, bb.msg.domain.Fetcher, "fetch " + uri) | ||
69 | fetchcmd = fetchcmd.replace("${URI}", uri.split(";")[0]) | ||
70 | fetchcmd = fetchcmd.replace("${FILE}", ud.basename) | ||
71 | httpproxy = None | ||
72 | ftpproxy = None | ||
73 | if uri_type == 'http': | ||
74 | httpproxy = data.getVar("HTTP_PROXY", d, True) | ||
75 | httpproxy_ignore = (data.getVar("HTTP_PROXY_IGNORE", d, True) or "").split() | ||
76 | for p in httpproxy_ignore: | ||
77 | if uri_host.endswith(p): | ||
78 | httpproxy = None | ||
79 | break | ||
80 | if uri_type == 'ftp': | ||
81 | ftpproxy = data.getVar("FTP_PROXY", d, True) | ||
82 | ftpproxy_ignore = (data.getVar("HTTP_PROXY_IGNORE", d, True) or "").split() | ||
83 | for p in ftpproxy_ignore: | ||
84 | if uri_host.endswith(p): | ||
85 | ftpproxy = None | ||
86 | break | ||
87 | if httpproxy: | ||
88 | fetchcmd = "http_proxy=" + httpproxy + " " + fetchcmd | ||
89 | if ftpproxy: | ||
90 | fetchcmd = "ftp_proxy=" + ftpproxy + " " + fetchcmd | ||
91 | bb.msg.debug(2, bb.msg.domain.Fetcher, "executing " + fetchcmd) | ||
92 | ret = os.system(fetchcmd) | ||
93 | if ret != 0: | ||
94 | return False | ||
95 | |||
96 | # Sanity check since wget can pretend it succeed when it didn't | ||
97 | # Also, this used to happen if sourceforge sent us to the mirror page | ||
98 | if not os.path.exists(ud.localpath) and not checkonly: | ||
99 | bb.msg.debug(2, bb.msg.domain.Fetcher, "The fetch command for %s returned success but %s doesn't exist?..." % (uri, ud.localpath)) | ||
100 | return False | ||
101 | |||
102 | return True | ||
103 | |||
104 | localdata = data.createCopy(d) | ||
105 | data.setVar('OVERRIDES', "wget:" + data.getVar('OVERRIDES', localdata), localdata) | ||
106 | data.update_data(localdata) | ||
107 | |||
108 | premirrors = [ i.split() for i in (data.getVar('PREMIRRORS', localdata, 1) or "").split('\n') if i ] | ||
109 | for (find, replace) in premirrors: | ||
110 | newuri = uri_replace(uri, find, replace, d) | ||
111 | if newuri != uri: | ||
112 | if fetch_uri(newuri, ud, localdata): | ||
113 | return True | ||
114 | |||
115 | if fetch_uri(uri, ud, localdata): | ||
116 | return True | ||
117 | |||
118 | # try mirrors | ||
119 | mirrors = [ i.split() for i in (data.getVar('MIRRORS', localdata, 1) or "").split('\n') if i ] | ||
120 | for (find, replace) in mirrors: | ||
121 | newuri = uri_replace(uri, find, replace, d) | ||
122 | if newuri != uri: | ||
123 | if fetch_uri(newuri, ud, localdata): | ||
124 | return True | ||
125 | |||
126 | raise FetchError(uri) | ||
127 | |||
128 | |||
129 | def checkstatus(self, uri, ud, d): | ||
130 | return self.go(uri, ud, d, True) | ||
diff --git a/bitbake-dev/lib/bb/manifest.py b/bitbake-dev/lib/bb/manifest.py deleted file mode 100644 index 4e4b7d98ec..0000000000 --- a/bitbake-dev/lib/bb/manifest.py +++ /dev/null | |||
@@ -1,144 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (C) 2003, 2004 Chris Larson | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify | ||
7 | # it under the terms of the GNU General Public License version 2 as | ||
8 | # published by the Free Software Foundation. | ||
9 | # | ||
10 | # This program is distributed in the hope that it will be useful, | ||
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | # GNU General Public License for more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along | ||
16 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
19 | import os, sys | ||
20 | import bb, bb.data | ||
21 | |||
22 | def getfields(line): | ||
23 | fields = {} | ||
24 | fieldmap = ( "pkg", "src", "dest", "type", "mode", "uid", "gid", "major", "minor", "start", "inc", "count" ) | ||
25 | for f in xrange(len(fieldmap)): | ||
26 | fields[fieldmap[f]] = None | ||
27 | |||
28 | if not line: | ||
29 | return None | ||
30 | |||
31 | splitline = line.split() | ||
32 | if not len(splitline): | ||
33 | return None | ||
34 | |||
35 | try: | ||
36 | for f in xrange(len(fieldmap)): | ||
37 | if splitline[f] == '-': | ||
38 | continue | ||
39 | fields[fieldmap[f]] = splitline[f] | ||
40 | except IndexError: | ||
41 | pass | ||
42 | return fields | ||
43 | |||
44 | def parse (mfile, d): | ||
45 | manifest = [] | ||
46 | while 1: | ||
47 | line = mfile.readline() | ||
48 | if not line: | ||
49 | break | ||
50 | if line.startswith("#"): | ||
51 | continue | ||
52 | fields = getfields(line) | ||
53 | if not fields: | ||
54 | continue | ||
55 | manifest.append(fields) | ||
56 | return manifest | ||
57 | |||
58 | def emit (func, manifest, d): | ||
59 | #str = "%s () {\n" % func | ||
60 | str = "" | ||
61 | for line in manifest: | ||
62 | emittedline = emit_line(func, line, d) | ||
63 | if not emittedline: | ||
64 | continue | ||
65 | str += emittedline + "\n" | ||
66 | # str += "}\n" | ||
67 | return str | ||
68 | |||
69 | def mangle (func, line, d): | ||
70 | import copy | ||
71 | newline = copy.copy(line) | ||
72 | src = bb.data.expand(newline["src"], d) | ||
73 | |||
74 | if src: | ||
75 | if not os.path.isabs(src): | ||
76 | src = "${WORKDIR}/" + src | ||
77 | |||
78 | dest = newline["dest"] | ||
79 | if not dest: | ||
80 | return | ||
81 | |||
82 | if dest.startswith("/"): | ||
83 | dest = dest[1:] | ||
84 | |||
85 | if func is "do_install": | ||
86 | dest = "${D}/" + dest | ||
87 | |||
88 | elif func is "do_populate": | ||
89 | dest = "${WORKDIR}/install/" + newline["pkg"] + "/" + dest | ||
90 | |||
91 | elif func is "do_stage": | ||
92 | varmap = {} | ||
93 | varmap["${bindir}"] = "${STAGING_DIR}/${HOST_SYS}/bin" | ||
94 | varmap["${libdir}"] = "${STAGING_DIR}/${HOST_SYS}/lib" | ||
95 | varmap["${includedir}"] = "${STAGING_DIR}/${HOST_SYS}/include" | ||
96 | varmap["${datadir}"] = "${STAGING_DATADIR}" | ||
97 | |||
98 | matched = 0 | ||
99 | for key in varmap.keys(): | ||
100 | if dest.startswith(key): | ||
101 | dest = varmap[key] + "/" + dest[len(key):] | ||
102 | matched = 1 | ||
103 | if not matched: | ||
104 | newline = None | ||
105 | return | ||
106 | else: | ||
107 | newline = None | ||
108 | return | ||
109 | |||
110 | newline["src"] = src | ||
111 | newline["dest"] = dest | ||
112 | return newline | ||
113 | |||
114 | def emit_line (func, line, d): | ||
115 | import copy | ||
116 | newline = copy.deepcopy(line) | ||
117 | newline = mangle(func, newline, d) | ||
118 | if not newline: | ||
119 | return None | ||
120 | |||
121 | str = "" | ||
122 | type = newline["type"] | ||
123 | mode = newline["mode"] | ||
124 | src = newline["src"] | ||
125 | dest = newline["dest"] | ||
126 | if type is "d": | ||
127 | str = "install -d " | ||
128 | if mode: | ||
129 | str += "-m %s " % mode | ||
130 | str += dest | ||
131 | elif type is "f": | ||
132 | if not src: | ||
133 | return None | ||
134 | if dest.endswith("/"): | ||
135 | str = "install -d " | ||
136 | str += dest + "\n" | ||
137 | str += "install " | ||
138 | else: | ||
139 | str = "install -D " | ||
140 | if mode: | ||
141 | str += "-m %s " % mode | ||
142 | str += src + " " + dest | ||
143 | del newline | ||
144 | return str | ||
diff --git a/bitbake-dev/lib/bb/methodpool.py b/bitbake-dev/lib/bb/methodpool.py deleted file mode 100644 index f43c4a0580..0000000000 --- a/bitbake-dev/lib/bb/methodpool.py +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # | ||
5 | # Copyright (C) 2006 Holger Hans Peter Freyther | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | |||
20 | |||
21 | """ | ||
22 | What is a method pool? | ||
23 | |||
24 | BitBake has a global method scope where .bb, .inc and .bbclass | ||
25 | files can install methods. These methods are parsed from strings. | ||
26 | To avoid recompiling and executing these string we introduce | ||
27 | a method pool to do this task. | ||
28 | |||
29 | This pool will be used to compile and execute the functions. It | ||
30 | will be smart enough to | ||
31 | """ | ||
32 | |||
33 | from bb.utils import better_compile, better_exec | ||
34 | from bb import error | ||
35 | |||
36 | # A dict of modules we have handled | ||
37 | # it is the number of .bbclasses + x in size | ||
38 | _parsed_methods = { } | ||
39 | _parsed_fns = { } | ||
40 | |||
41 | def insert_method(modulename, code, fn): | ||
42 | """ | ||
43 | Add code of a module should be added. The methods | ||
44 | will be simply added, no checking will be done | ||
45 | """ | ||
46 | comp = better_compile(code, "<bb>", fn ) | ||
47 | better_exec(comp, __builtins__, code, fn) | ||
48 | |||
49 | # now some instrumentation | ||
50 | code = comp.co_names | ||
51 | for name in code: | ||
52 | if name in ['None', 'False']: | ||
53 | continue | ||
54 | elif name in _parsed_fns and not _parsed_fns[name] == modulename: | ||
55 | error( "Error Method already seen: %s in' %s' now in '%s'" % (name, _parsed_fns[name], modulename)) | ||
56 | else: | ||
57 | _parsed_fns[name] = modulename | ||
58 | |||
59 | def check_insert_method(modulename, code, fn): | ||
60 | """ | ||
61 | Add the code if it wasnt added before. The module | ||
62 | name will be used for that | ||
63 | |||
64 | Variables: | ||
65 | @modulename a short name e.g. base.bbclass | ||
66 | @code The actual python code | ||
67 | @fn The filename from the outer file | ||
68 | """ | ||
69 | if not modulename in _parsed_methods: | ||
70 | return insert_method(modulename, code, fn) | ||
71 | _parsed_methods[modulename] = 1 | ||
72 | |||
73 | def parsed_module(modulename): | ||
74 | """ | ||
75 | Inform me file xyz was parsed | ||
76 | """ | ||
77 | return modulename in _parsed_methods | ||
78 | |||
79 | |||
80 | def get_parsed_dict(): | ||
81 | """ | ||
82 | shortcut | ||
83 | """ | ||
84 | return _parsed_methods | ||
diff --git a/bitbake-dev/lib/bb/msg.py b/bitbake-dev/lib/bb/msg.py deleted file mode 100644 index 3fcf7091be..0000000000 --- a/bitbake-dev/lib/bb/msg.py +++ /dev/null | |||
@@ -1,125 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake 'msg' implementation | ||
5 | |||
6 | Message handling infrastructure for bitbake | ||
7 | |||
8 | """ | ||
9 | |||
10 | # Copyright (C) 2006 Richard Purdie | ||
11 | # | ||
12 | # This program is free software; you can redistribute it and/or modify | ||
13 | # it under the terms of the GNU General Public License version 2 as | ||
14 | # published by the Free Software Foundation. | ||
15 | # | ||
16 | # This program is distributed in the hope that it will be useful, | ||
17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | # GNU General Public License for more details. | ||
20 | # | ||
21 | # You should have received a copy of the GNU General Public License along | ||
22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
24 | |||
25 | import sys, bb | ||
26 | from bb import event | ||
27 | |||
28 | debug_level = {} | ||
29 | |||
30 | verbose = False | ||
31 | |||
32 | domain = bb.utils.Enum( | ||
33 | 'Build', | ||
34 | 'Cache', | ||
35 | 'Collection', | ||
36 | 'Data', | ||
37 | 'Depends', | ||
38 | 'Fetcher', | ||
39 | 'Parsing', | ||
40 | 'PersistData', | ||
41 | 'Provider', | ||
42 | 'RunQueue', | ||
43 | 'TaskData', | ||
44 | 'Util') | ||
45 | |||
46 | |||
47 | class MsgBase(bb.event.Event): | ||
48 | """Base class for messages""" | ||
49 | |||
50 | def __init__(self, msg): | ||
51 | self._message = msg | ||
52 | event.Event.__init__(self) | ||
53 | |||
54 | class MsgDebug(MsgBase): | ||
55 | """Debug Message""" | ||
56 | |||
57 | class MsgNote(MsgBase): | ||
58 | """Note Message""" | ||
59 | |||
60 | class MsgWarn(MsgBase): | ||
61 | """Warning Message""" | ||
62 | |||
63 | class MsgError(MsgBase): | ||
64 | """Error Message""" | ||
65 | |||
66 | class MsgFatal(MsgBase): | ||
67 | """Fatal Message""" | ||
68 | |||
69 | class MsgPlain(MsgBase): | ||
70 | """General output""" | ||
71 | |||
72 | # | ||
73 | # Message control functions | ||
74 | # | ||
75 | |||
76 | def set_debug_level(level): | ||
77 | bb.msg.debug_level = {} | ||
78 | for domain in bb.msg.domain: | ||
79 | bb.msg.debug_level[domain] = level | ||
80 | bb.msg.debug_level['default'] = level | ||
81 | |||
82 | def set_verbose(level): | ||
83 | bb.msg.verbose = level | ||
84 | |||
85 | def set_debug_domains(domains): | ||
86 | for domain in domains: | ||
87 | found = False | ||
88 | for ddomain in bb.msg.domain: | ||
89 | if domain == str(ddomain): | ||
90 | bb.msg.debug_level[ddomain] = bb.msg.debug_level[ddomain] + 1 | ||
91 | found = True | ||
92 | if not found: | ||
93 | bb.msg.warn(None, "Logging domain %s is not valid, ignoring" % domain) | ||
94 | |||
95 | # | ||
96 | # Message handling functions | ||
97 | # | ||
98 | |||
99 | def debug(level, domain, msg, fn = None): | ||
100 | if not domain: | ||
101 | domain = 'default' | ||
102 | if debug_level[domain] >= level: | ||
103 | bb.event.fire(MsgDebug(msg), None) | ||
104 | |||
105 | def note(level, domain, msg, fn = None): | ||
106 | if not domain: | ||
107 | domain = 'default' | ||
108 | if level == 1 or verbose or debug_level[domain] >= 1: | ||
109 | bb.event.fire(MsgNote(msg), None) | ||
110 | |||
111 | def warn(domain, msg, fn = None): | ||
112 | bb.event.fire(MsgWarn(msg), None) | ||
113 | |||
114 | def error(domain, msg, fn = None): | ||
115 | bb.event.fire(MsgError(msg), None) | ||
116 | print 'ERROR: ' + msg | ||
117 | |||
118 | def fatal(domain, msg, fn = None): | ||
119 | bb.event.fire(MsgFatal(msg), None) | ||
120 | print 'FATAL: ' + msg | ||
121 | sys.exit(1) | ||
122 | |||
123 | def plain(msg, fn = None): | ||
124 | bb.event.fire(MsgPlain(msg), None) | ||
125 | |||
diff --git a/bitbake-dev/lib/bb/parse/__init__.py b/bitbake-dev/lib/bb/parse/__init__.py deleted file mode 100644 index 5dd96c4136..0000000000 --- a/bitbake-dev/lib/bb/parse/__init__.py +++ /dev/null | |||
@@ -1,84 +0,0 @@ | |||
1 | """ | ||
2 | BitBake Parsers | ||
3 | |||
4 | File parsers for the BitBake build tools. | ||
5 | |||
6 | """ | ||
7 | |||
8 | |||
9 | # Copyright (C) 2003, 2004 Chris Larson | ||
10 | # Copyright (C) 2003, 2004 Phil Blundell | ||
11 | # | ||
12 | # This program is free software; you can redistribute it and/or modify | ||
13 | # it under the terms of the GNU General Public License version 2 as | ||
14 | # published by the Free Software Foundation. | ||
15 | # | ||
16 | # This program is distributed in the hope that it will be useful, | ||
17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | # GNU General Public License for more details. | ||
20 | # | ||
21 | # You should have received a copy of the GNU General Public License along | ||
22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
24 | # | ||
25 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
26 | |||
27 | __all__ = [ 'ParseError', 'SkipPackage', 'cached_mtime', 'mark_dependency', | ||
28 | 'supports', 'handle', 'init' ] | ||
29 | handlers = [] | ||
30 | |||
31 | import bb, os | ||
32 | |||
33 | class ParseError(Exception): | ||
34 | """Exception raised when parsing fails""" | ||
35 | |||
36 | class SkipPackage(Exception): | ||
37 | """Exception raised to skip this package""" | ||
38 | |||
39 | __mtime_cache = {} | ||
40 | def cached_mtime(f): | ||
41 | if not __mtime_cache.has_key(f): | ||
42 | __mtime_cache[f] = os.stat(f)[8] | ||
43 | return __mtime_cache[f] | ||
44 | |||
45 | def cached_mtime_noerror(f): | ||
46 | if not __mtime_cache.has_key(f): | ||
47 | try: | ||
48 | __mtime_cache[f] = os.stat(f)[8] | ||
49 | except OSError: | ||
50 | return 0 | ||
51 | return __mtime_cache[f] | ||
52 | |||
53 | def update_mtime(f): | ||
54 | __mtime_cache[f] = os.stat(f)[8] | ||
55 | return __mtime_cache[f] | ||
56 | |||
57 | def mark_dependency(d, f): | ||
58 | if f.startswith('./'): | ||
59 | f = "%s/%s" % (os.getcwd(), f[2:]) | ||
60 | deps = bb.data.getVar('__depends', d) or [] | ||
61 | deps.append( (f, cached_mtime(f)) ) | ||
62 | bb.data.setVar('__depends', deps, d) | ||
63 | |||
64 | def supports(fn, data): | ||
65 | """Returns true if we have a handler for this file, false otherwise""" | ||
66 | for h in handlers: | ||
67 | if h['supports'](fn, data): | ||
68 | return 1 | ||
69 | return 0 | ||
70 | |||
71 | def handle(fn, data, include = 0): | ||
72 | """Call the handler that is appropriate for this file""" | ||
73 | for h in handlers: | ||
74 | if h['supports'](fn, data): | ||
75 | return h['handle'](fn, data, include) | ||
76 | raise ParseError("%s is not a BitBake file" % fn) | ||
77 | |||
78 | def init(fn, data): | ||
79 | for h in handlers: | ||
80 | if h['supports'](fn): | ||
81 | return h['init'](data) | ||
82 | |||
83 | |||
84 | from parse_py import __version__, ConfHandler, BBHandler | ||
diff --git a/bitbake-dev/lib/bb/parse/parse_py/BBHandler.py b/bitbake-dev/lib/bb/parse/parse_py/BBHandler.py deleted file mode 100644 index 86fa18ebd2..0000000000 --- a/bitbake-dev/lib/bb/parse/parse_py/BBHandler.py +++ /dev/null | |||
@@ -1,410 +0,0 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # ex:ts=4:sw=4:sts=4:et | ||
3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
4 | """ | ||
5 | class for handling .bb files | ||
6 | |||
7 | Reads a .bb file and obtains its metadata | ||
8 | |||
9 | """ | ||
10 | |||
11 | |||
12 | # Copyright (C) 2003, 2004 Chris Larson | ||
13 | # Copyright (C) 2003, 2004 Phil Blundell | ||
14 | # | ||
15 | # This program is free software; you can redistribute it and/or modify | ||
16 | # it under the terms of the GNU General Public License version 2 as | ||
17 | # published by the Free Software Foundation. | ||
18 | # | ||
19 | # This program is distributed in the hope that it will be useful, | ||
20 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
21 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
22 | # GNU General Public License for more details. | ||
23 | # | ||
24 | # You should have received a copy of the GNU General Public License along | ||
25 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
26 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
27 | |||
28 | import re, bb, os, sys, time, string | ||
29 | import bb.fetch, bb.build, bb.utils | ||
30 | from bb import data, fetch, methodpool | ||
31 | |||
32 | from ConfHandler import include, localpath, obtain, init | ||
33 | from bb.parse import ParseError | ||
34 | |||
35 | __func_start_regexp__ = re.compile( r"(((?P<py>python)|(?P<fr>fakeroot))\s*)*(?P<func>[\w\.\-\+\{\}\$]+)?\s*\(\s*\)\s*{$" ) | ||
36 | __inherit_regexp__ = re.compile( r"inherit\s+(.+)" ) | ||
37 | __export_func_regexp__ = re.compile( r"EXPORT_FUNCTIONS\s+(.+)" ) | ||
38 | __addtask_regexp__ = re.compile("addtask\s+(?P<func>\w+)\s*((before\s*(?P<before>((.*(?=after))|(.*))))|(after\s*(?P<after>((.*(?=before))|(.*)))))*") | ||
39 | __addhandler_regexp__ = re.compile( r"addhandler\s+(.+)" ) | ||
40 | __def_regexp__ = re.compile( r"def\s+(\w+).*:" ) | ||
41 | __python_func_regexp__ = re.compile( r"(\s+.*)|(^$)" ) | ||
42 | __word__ = re.compile(r"\S+") | ||
43 | |||
44 | __infunc__ = "" | ||
45 | __inpython__ = False | ||
46 | __body__ = [] | ||
47 | __classname__ = "" | ||
48 | classes = [ None, ] | ||
49 | |||
50 | # We need to indicate EOF to the feeder. This code is so messy that | ||
51 | # factoring it out to a close_parse_file method is out of question. | ||
52 | # We will use the IN_PYTHON_EOF as an indicator to just close the method | ||
53 | # | ||
54 | # The two parts using it are tightly integrated anyway | ||
55 | IN_PYTHON_EOF = -9999999999999 | ||
56 | |||
57 | __parsed_methods__ = methodpool.get_parsed_dict() | ||
58 | |||
59 | def supports(fn, d): | ||
60 | localfn = localpath(fn, d) | ||
61 | return localfn[-3:] == ".bb" or localfn[-8:] == ".bbclass" or localfn[-4:] == ".inc" | ||
62 | |||
63 | def inherit(files, d): | ||
64 | __inherit_cache = data.getVar('__inherit_cache', d) or [] | ||
65 | fn = "" | ||
66 | lineno = 0 | ||
67 | files = data.expand(files, d) | ||
68 | for file in files: | ||
69 | if file[0] != "/" and file[-8:] != ".bbclass": | ||
70 | file = os.path.join('classes', '%s.bbclass' % file) | ||
71 | |||
72 | if not file in __inherit_cache: | ||
73 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB %s:%d: inheriting %s" % (fn, lineno, file)) | ||
74 | __inherit_cache.append( file ) | ||
75 | data.setVar('__inherit_cache', __inherit_cache, d) | ||
76 | include(fn, file, d, "inherit") | ||
77 | __inherit_cache = data.getVar('__inherit_cache', d) or [] | ||
78 | |||
79 | |||
80 | def finalise(fn, d): | ||
81 | data.expandKeys(d) | ||
82 | data.update_data(d) | ||
83 | anonqueue = data.getVar("__anonqueue", d, 1) or [] | ||
84 | body = [x['content'] for x in anonqueue] | ||
85 | flag = { 'python' : 1, 'func' : 1 } | ||
86 | data.setVar("__anonfunc", "\n".join(body), d) | ||
87 | data.setVarFlags("__anonfunc", flag, d) | ||
88 | from bb import build | ||
89 | try: | ||
90 | t = data.getVar('T', d) | ||
91 | data.setVar('T', '${TMPDIR}/anonfunc/', d) | ||
92 | anonfuncs = data.getVar('__BBANONFUNCS', d) or [] | ||
93 | code = "" | ||
94 | for f in anonfuncs: | ||
95 | code = code + " %s(d)\n" % f | ||
96 | data.setVar("__anonfunc", code, d) | ||
97 | build.exec_func("__anonfunc", d) | ||
98 | data.delVar('T', d) | ||
99 | if t: | ||
100 | data.setVar('T', t, d) | ||
101 | except Exception, e: | ||
102 | bb.msg.debug(1, bb.msg.domain.Parsing, "Exception when executing anonymous function: %s" % e) | ||
103 | raise | ||
104 | data.delVar("__anonqueue", d) | ||
105 | data.delVar("__anonfunc", d) | ||
106 | data.update_data(d) | ||
107 | |||
108 | all_handlers = {} | ||
109 | for var in data.getVar('__BBHANDLERS', d) or []: | ||
110 | # try to add the handler | ||
111 | handler = data.getVar(var,d) | ||
112 | bb.event.register(var, handler) | ||
113 | |||
114 | tasklist = data.getVar('__BBTASKS', d) or [] | ||
115 | bb.build.add_tasks(tasklist, d) | ||
116 | |||
117 | bb.event.fire(bb.event.RecipeParsed(fn), d) | ||
118 | |||
119 | |||
120 | def handle(fn, d, include = 0): | ||
121 | global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __infunc__, __body__, __residue__ | ||
122 | __body__ = [] | ||
123 | __infunc__ = "" | ||
124 | __classname__ = "" | ||
125 | __residue__ = [] | ||
126 | |||
127 | if include == 0: | ||
128 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data)") | ||
129 | else: | ||
130 | bb.msg.debug(2, bb.msg.domain.Parsing, "BB " + fn + ": handle(data, include)") | ||
131 | |||
132 | (root, ext) = os.path.splitext(os.path.basename(fn)) | ||
133 | base_name = "%s%s" % (root,ext) | ||
134 | init(d) | ||
135 | |||
136 | if ext == ".bbclass": | ||
137 | __classname__ = root | ||
138 | classes.append(__classname__) | ||
139 | __inherit_cache = data.getVar('__inherit_cache', d) or [] | ||
140 | if not fn in __inherit_cache: | ||
141 | __inherit_cache.append(fn) | ||
142 | data.setVar('__inherit_cache', __inherit_cache, d) | ||
143 | |||
144 | if include != 0: | ||
145 | oldfile = data.getVar('FILE', d) | ||
146 | else: | ||
147 | oldfile = None | ||
148 | |||
149 | fn = obtain(fn, d) | ||
150 | bbpath = (data.getVar('BBPATH', d, 1) or '').split(':') | ||
151 | if not os.path.isabs(fn): | ||
152 | f = None | ||
153 | for p in bbpath: | ||
154 | j = os.path.join(p, fn) | ||
155 | if os.access(j, os.R_OK): | ||
156 | abs_fn = j | ||
157 | f = open(j, 'r') | ||
158 | break | ||
159 | if f is None: | ||
160 | raise IOError("file %s not found" % fn) | ||
161 | else: | ||
162 | f = open(fn,'r') | ||
163 | abs_fn = fn | ||
164 | |||
165 | if include: | ||
166 | bb.parse.mark_dependency(d, abs_fn) | ||
167 | |||
168 | if ext != ".bbclass": | ||
169 | data.setVar('FILE', fn, d) | ||
170 | |||
171 | lineno = 0 | ||
172 | while 1: | ||
173 | lineno = lineno + 1 | ||
174 | s = f.readline() | ||
175 | if not s: break | ||
176 | s = s.rstrip() | ||
177 | feeder(lineno, s, fn, base_name, d) | ||
178 | if __inpython__: | ||
179 | # add a blank line to close out any python definition | ||
180 | feeder(IN_PYTHON_EOF, "", fn, base_name, d) | ||
181 | if ext == ".bbclass": | ||
182 | classes.remove(__classname__) | ||
183 | else: | ||
184 | if include == 0: | ||
185 | multi = data.getVar('BBCLASSEXTEND', d, 1) | ||
186 | if multi: | ||
187 | based = bb.data.createCopy(d) | ||
188 | else: | ||
189 | based = d | ||
190 | try: | ||
191 | finalise(fn, based) | ||
192 | except bb.parse.SkipPackage: | ||
193 | bb.data.setVar("__SKIPPED", True, based) | ||
194 | darray = {"": based} | ||
195 | |||
196 | for cls in (multi or "").split(): | ||
197 | pn = data.getVar('PN', d, True) | ||
198 | based = bb.data.createCopy(d) | ||
199 | data.setVar('PN', pn + '-' + cls, based) | ||
200 | inherit([cls], based) | ||
201 | try: | ||
202 | finalise(fn, based) | ||
203 | except bb.parse.SkipPackage: | ||
204 | bb.data.setVar("__SKIPPED", True, based) | ||
205 | darray[cls] = based | ||
206 | return darray | ||
207 | |||
208 | bbpath.pop(0) | ||
209 | if oldfile: | ||
210 | bb.data.setVar("FILE", oldfile, d) | ||
211 | |||
212 | # we have parsed the bb class now | ||
213 | if ext == ".bbclass" or ext == ".inc": | ||
214 | __parsed_methods__[base_name] = 1 | ||
215 | |||
216 | return d | ||
217 | |||
218 | def feeder(lineno, s, fn, root, d): | ||
219 | global __func_start_regexp__, __inherit_regexp__, __export_func_regexp__, __addtask_regexp__, __addhandler_regexp__, __def_regexp__, __python_func_regexp__, __inpython__,__infunc__, __body__, classes, bb, __residue__ | ||
220 | if __infunc__: | ||
221 | if s == '}': | ||
222 | __body__.append('') | ||
223 | if __infunc__ == "__anonymous": | ||
224 | funcname = ("__anon_%s_%s" % (lineno, fn.translate(string.maketrans('/.+-', '____')))) | ||
225 | if not funcname in methodpool._parsed_fns: | ||
226 | text = "def %s(d):\n" % (funcname) + '\n'.join(__body__) | ||
227 | methodpool.insert_method(funcname, text, fn) | ||
228 | anonfuncs = data.getVar('__BBANONFUNCS', d) or [] | ||
229 | anonfuncs.append(funcname) | ||
230 | data.setVar('__BBANONFUNCS', anonfuncs, d) | ||
231 | else: | ||
232 | data.setVarFlag(__infunc__, "func", 1, d) | ||
233 | data.setVar(__infunc__, '\n'.join(__body__), d) | ||
234 | __infunc__ = "" | ||
235 | __body__ = [] | ||
236 | else: | ||
237 | __body__.append(s) | ||
238 | return | ||
239 | |||
240 | if __inpython__: | ||
241 | m = __python_func_regexp__.match(s) | ||
242 | if m and lineno != IN_PYTHON_EOF: | ||
243 | __body__.append(s) | ||
244 | return | ||
245 | else: | ||
246 | # Note we will add root to parsedmethods after having parse | ||
247 | # 'this' file. This means we will not parse methods from | ||
248 | # bb classes twice | ||
249 | if not root in __parsed_methods__: | ||
250 | text = '\n'.join(__body__) | ||
251 | methodpool.insert_method( root, text, fn ) | ||
252 | __body__ = [] | ||
253 | __inpython__ = False | ||
254 | |||
255 | if lineno == IN_PYTHON_EOF: | ||
256 | return | ||
257 | |||
258 | # fall through | ||
259 | |||
260 | if s == '' or s[0] == '#': return # skip comments and empty lines | ||
261 | |||
262 | if s[-1] == '\\': | ||
263 | __residue__.append(s[:-1]) | ||
264 | return | ||
265 | |||
266 | s = "".join(__residue__) + s | ||
267 | __residue__ = [] | ||
268 | |||
269 | m = __func_start_regexp__.match(s) | ||
270 | if m: | ||
271 | __infunc__ = m.group("func") or "__anonymous" | ||
272 | key = __infunc__ | ||
273 | if data.getVar(key, d): | ||
274 | # clean up old version of this piece of metadata, as its | ||
275 | # flags could cause problems | ||
276 | data.setVarFlag(key, 'python', None, d) | ||
277 | data.setVarFlag(key, 'fakeroot', None, d) | ||
278 | if m.group("py") is not None: | ||
279 | data.setVarFlag(key, "python", "1", d) | ||
280 | else: | ||
281 | data.delVarFlag(key, "python", d) | ||
282 | if m.group("fr") is not None: | ||
283 | data.setVarFlag(key, "fakeroot", "1", d) | ||
284 | else: | ||
285 | data.delVarFlag(key, "fakeroot", d) | ||
286 | return | ||
287 | |||
288 | m = __def_regexp__.match(s) | ||
289 | if m: | ||
290 | __body__.append(s) | ||
291 | __inpython__ = True | ||
292 | return | ||
293 | |||
294 | m = __export_func_regexp__.match(s) | ||
295 | if m: | ||
296 | fns = m.group(1) | ||
297 | n = __word__.findall(fns) | ||
298 | for f in n: | ||
299 | allvars = [] | ||
300 | allvars.append(f) | ||
301 | allvars.append(classes[-1] + "_" + f) | ||
302 | |||
303 | vars = [[ allvars[0], allvars[1] ]] | ||
304 | if len(classes) > 1 and classes[-2] is not None: | ||
305 | allvars.append(classes[-2] + "_" + f) | ||
306 | vars = [] | ||
307 | vars.append([allvars[2], allvars[1]]) | ||
308 | vars.append([allvars[0], allvars[2]]) | ||
309 | |||
310 | for (var, calledvar) in vars: | ||
311 | if data.getVar(var, d) and not data.getVarFlag(var, 'export_func', d): | ||
312 | continue | ||
313 | |||
314 | if data.getVar(var, d): | ||
315 | data.setVarFlag(var, 'python', None, d) | ||
316 | data.setVarFlag(var, 'func', None, d) | ||
317 | |||
318 | for flag in [ "func", "python" ]: | ||
319 | if data.getVarFlag(calledvar, flag, d): | ||
320 | data.setVarFlag(var, flag, data.getVarFlag(calledvar, flag, d), d) | ||
321 | for flag in [ "dirs" ]: | ||
322 | if data.getVarFlag(var, flag, d): | ||
323 | data.setVarFlag(calledvar, flag, data.getVarFlag(var, flag, d), d) | ||
324 | |||
325 | if data.getVarFlag(calledvar, "python", d): | ||
326 | data.setVar(var, "\tbb.build.exec_func('" + calledvar + "', d)\n", d) | ||
327 | else: | ||
328 | data.setVar(var, "\t" + calledvar + "\n", d) | ||
329 | data.setVarFlag(var, 'export_func', '1', d) | ||
330 | |||
331 | return | ||
332 | |||
333 | m = __addtask_regexp__.match(s) | ||
334 | if m: | ||
335 | func = m.group("func") | ||
336 | before = m.group("before") | ||
337 | after = m.group("after") | ||
338 | if func is None: | ||
339 | return | ||
340 | if func[:3] != "do_": | ||
341 | var = "do_" + func | ||
342 | |||
343 | data.setVarFlag(var, "task", 1, d) | ||
344 | |||
345 | bbtasks = data.getVar('__BBTASKS', d) or [] | ||
346 | if not var in bbtasks: | ||
347 | bbtasks.append(var) | ||
348 | data.setVar('__BBTASKS', bbtasks, d) | ||
349 | |||
350 | existing = data.getVarFlag(var, "deps", d) or [] | ||
351 | if after is not None: | ||
352 | # set up deps for function | ||
353 | for entry in after.split(): | ||
354 | if entry not in existing: | ||
355 | existing.append(entry) | ||
356 | data.setVarFlag(var, "deps", existing, d) | ||
357 | if before is not None: | ||
358 | # set up things that depend on this func | ||
359 | for entry in before.split(): | ||
360 | existing = data.getVarFlag(entry, "deps", d) or [] | ||
361 | if var not in existing: | ||
362 | data.setVarFlag(entry, "deps", [var] + existing, d) | ||
363 | return | ||
364 | |||
365 | m = __addhandler_regexp__.match(s) | ||
366 | if m: | ||
367 | fns = m.group(1) | ||
368 | hs = __word__.findall(fns) | ||
369 | bbhands = data.getVar('__BBHANDLERS', d) or [] | ||
370 | for h in hs: | ||
371 | bbhands.append(h) | ||
372 | data.setVarFlag(h, "handler", 1, d) | ||
373 | data.setVar('__BBHANDLERS', bbhands, d) | ||
374 | return | ||
375 | |||
376 | m = __inherit_regexp__.match(s) | ||
377 | if m: | ||
378 | |||
379 | files = m.group(1) | ||
380 | n = __word__.findall(files) | ||
381 | inherit(n, d) | ||
382 | return | ||
383 | |||
384 | from bb.parse import ConfHandler | ||
385 | return ConfHandler.feeder(lineno, s, fn, d) | ||
386 | |||
387 | __pkgsplit_cache__={} | ||
388 | def vars_from_file(mypkg, d): | ||
389 | if not mypkg: | ||
390 | return (None, None, None) | ||
391 | if mypkg in __pkgsplit_cache__: | ||
392 | return __pkgsplit_cache__[mypkg] | ||
393 | |||
394 | myfile = os.path.splitext(os.path.basename(mypkg)) | ||
395 | parts = myfile[0].split('_') | ||
396 | __pkgsplit_cache__[mypkg] = parts | ||
397 | if len(parts) > 3: | ||
398 | raise ParseError("Unable to generate default variables from the filename: %s (too many underscores)" % mypkg) | ||
399 | exp = 3 - len(parts) | ||
400 | tmplist = [] | ||
401 | while exp != 0: | ||
402 | exp -= 1 | ||
403 | tmplist.append(None) | ||
404 | parts.extend(tmplist) | ||
405 | return parts | ||
406 | |||
407 | # Add us to the handlers list | ||
408 | from bb.parse import handlers | ||
409 | handlers.append({'supports': supports, 'handle': handle, 'init': init}) | ||
410 | del handlers | ||
diff --git a/bitbake-dev/lib/bb/parse/parse_py/ConfHandler.py b/bitbake-dev/lib/bb/parse/parse_py/ConfHandler.py deleted file mode 100644 index 23316ada58..0000000000 --- a/bitbake-dev/lib/bb/parse/parse_py/ConfHandler.py +++ /dev/null | |||
@@ -1,241 +0,0 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # ex:ts=4:sw=4:sts=4:et | ||
3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
4 | """ | ||
5 | class for handling configuration data files | ||
6 | |||
7 | Reads a .conf file and obtains its metadata | ||
8 | |||
9 | """ | ||
10 | |||
11 | # Copyright (C) 2003, 2004 Chris Larson | ||
12 | # Copyright (C) 2003, 2004 Phil Blundell | ||
13 | # | ||
14 | # This program is free software; you can redistribute it and/or modify | ||
15 | # it under the terms of the GNU General Public License version 2 as | ||
16 | # published by the Free Software Foundation. | ||
17 | # | ||
18 | # This program is distributed in the hope that it will be useful, | ||
19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | # GNU General Public License for more details. | ||
22 | # | ||
23 | # You should have received a copy of the GNU General Public License along | ||
24 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
25 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
26 | |||
27 | import re, bb.data, os, sys | ||
28 | from bb.parse import ParseError | ||
29 | |||
30 | #__config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}]+)\s*(?P<colon>:)?(?P<ques>\?)?=\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$") | ||
31 | __config_regexp__ = re.compile( r"(?P<exp>export\s*)?(?P<var>[a-zA-Z0-9\-_+.${}/]+)(\[(?P<flag>[a-zA-Z0-9\-_+.]+)\])?\s*((?P<colon>:=)|(?P<ques>\?=)|(?P<append>\+=)|(?P<prepend>=\+)|(?P<predot>=\.)|(?P<postdot>\.=)|=)\s*(?P<apo>['\"]?)(?P<value>.*)(?P=apo)$") | ||
32 | __include_regexp__ = re.compile( r"include\s+(.+)" ) | ||
33 | __require_regexp__ = re.compile( r"require\s+(.+)" ) | ||
34 | __export_regexp__ = re.compile( r"export\s+(.+)" ) | ||
35 | |||
36 | def init(data): | ||
37 | topdir = bb.data.getVar('TOPDIR', data) | ||
38 | if not topdir: | ||
39 | topdir = os.getcwd() | ||
40 | bb.data.setVar('TOPDIR', topdir, data) | ||
41 | if not bb.data.getVar('BBPATH', data): | ||
42 | from pkg_resources import Requirement, resource_filename | ||
43 | bitbake = Requirement.parse("bitbake") | ||
44 | datadir = resource_filename(bitbake, "../share/bitbake") | ||
45 | basedir = resource_filename(bitbake, "..") | ||
46 | bb.data.setVar('BBPATH', '%s:%s:%s' % (topdir, datadir, basedir), data) | ||
47 | |||
48 | |||
49 | def supports(fn, d): | ||
50 | return localpath(fn, d)[-5:] == ".conf" | ||
51 | |||
52 | def localpath(fn, d): | ||
53 | if os.path.exists(fn): | ||
54 | return fn | ||
55 | |||
56 | if "://" not in fn: | ||
57 | return fn | ||
58 | |||
59 | localfn = None | ||
60 | try: | ||
61 | localfn = bb.fetch.localpath(fn, d, False) | ||
62 | except bb.MalformedUrl: | ||
63 | pass | ||
64 | |||
65 | if not localfn: | ||
66 | return fn | ||
67 | return localfn | ||
68 | |||
69 | def obtain(fn, data): | ||
70 | import sys, bb | ||
71 | fn = bb.data.expand(fn, data) | ||
72 | localfn = bb.data.expand(localpath(fn, data), data) | ||
73 | |||
74 | if localfn != fn: | ||
75 | dldir = bb.data.getVar('DL_DIR', data, 1) | ||
76 | if not dldir: | ||
77 | bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: DL_DIR not defined") | ||
78 | return localfn | ||
79 | bb.mkdirhier(dldir) | ||
80 | try: | ||
81 | bb.fetch.init([fn], data) | ||
82 | except bb.fetch.NoMethodError: | ||
83 | (type, value, traceback) = sys.exc_info() | ||
84 | bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: no method: %s" % value) | ||
85 | return localfn | ||
86 | |||
87 | try: | ||
88 | bb.fetch.go(data) | ||
89 | except bb.fetch.MissingParameterError: | ||
90 | (type, value, traceback) = sys.exc_info() | ||
91 | bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: missing parameters: %s" % value) | ||
92 | return localfn | ||
93 | except bb.fetch.FetchError: | ||
94 | (type, value, traceback) = sys.exc_info() | ||
95 | bb.msg.debug(1, bb.msg.domain.Parsing, "obtain: failed: %s" % value) | ||
96 | return localfn | ||
97 | return localfn | ||
98 | |||
99 | |||
100 | def include(oldfn, fn, data, error_out): | ||
101 | """ | ||
102 | |||
103 | error_out If True a ParseError will be reaised if the to be included | ||
104 | """ | ||
105 | if oldfn == fn: # prevent infinate recursion | ||
106 | return None | ||
107 | |||
108 | import bb | ||
109 | fn = bb.data.expand(fn, data) | ||
110 | oldfn = bb.data.expand(oldfn, data) | ||
111 | |||
112 | if not os.path.isabs(fn): | ||
113 | dname = os.path.dirname(oldfn) | ||
114 | bbpath = "%s:%s" % (dname, bb.data.getVar("BBPATH", data, 1)) | ||
115 | abs_fn = bb.which(bbpath, fn) | ||
116 | if abs_fn: | ||
117 | fn = abs_fn | ||
118 | |||
119 | from bb.parse import handle | ||
120 | try: | ||
121 | ret = handle(fn, data, True) | ||
122 | except IOError: | ||
123 | if error_out: | ||
124 | raise ParseError("Could not %(error_out)s file %(fn)s" % vars() ) | ||
125 | bb.msg.debug(2, bb.msg.domain.Parsing, "CONF file '%s' not found" % fn) | ||
126 | |||
127 | def handle(fn, data, include = 0): | ||
128 | if include: | ||
129 | inc_string = "including" | ||
130 | else: | ||
131 | inc_string = "reading" | ||
132 | init(data) | ||
133 | |||
134 | if include == 0: | ||
135 | oldfile = None | ||
136 | else: | ||
137 | oldfile = bb.data.getVar('FILE', data) | ||
138 | |||
139 | fn = obtain(fn, data) | ||
140 | if not os.path.isabs(fn): | ||
141 | f = None | ||
142 | bbpath = bb.data.getVar("BBPATH", data, 1) or [] | ||
143 | for p in bbpath.split(":"): | ||
144 | currname = os.path.join(p, fn) | ||
145 | if os.access(currname, os.R_OK): | ||
146 | f = open(currname, 'r') | ||
147 | abs_fn = currname | ||
148 | bb.msg.debug(2, bb.msg.domain.Parsing, "CONF %s %s" % (inc_string, currname)) | ||
149 | break | ||
150 | if f is None: | ||
151 | raise IOError("file '%s' not found" % fn) | ||
152 | else: | ||
153 | f = open(fn,'r') | ||
154 | bb.msg.debug(1, bb.msg.domain.Parsing, "CONF %s %s" % (inc_string,fn)) | ||
155 | abs_fn = fn | ||
156 | |||
157 | if include: | ||
158 | bb.parse.mark_dependency(data, abs_fn) | ||
159 | |||
160 | lineno = 0 | ||
161 | bb.data.setVar('FILE', fn, data) | ||
162 | while 1: | ||
163 | lineno = lineno + 1 | ||
164 | s = f.readline() | ||
165 | if not s: break | ||
166 | w = s.strip() | ||
167 | if not w: continue # skip empty lines | ||
168 | s = s.rstrip() | ||
169 | if s[0] == '#': continue # skip comments | ||
170 | while s[-1] == '\\': | ||
171 | s2 = f.readline()[:-1].strip() | ||
172 | lineno = lineno + 1 | ||
173 | s = s[:-1] + s2 | ||
174 | feeder(lineno, s, fn, data) | ||
175 | |||
176 | if oldfile: | ||
177 | bb.data.setVar('FILE', oldfile, data) | ||
178 | return data | ||
179 | |||
180 | def feeder(lineno, s, fn, data): | ||
181 | def getFunc(groupd, key, data): | ||
182 | if 'flag' in groupd and groupd['flag'] != None: | ||
183 | return bb.data.getVarFlag(key, groupd['flag'], data) | ||
184 | else: | ||
185 | return bb.data.getVar(key, data) | ||
186 | |||
187 | m = __config_regexp__.match(s) | ||
188 | if m: | ||
189 | groupd = m.groupdict() | ||
190 | key = groupd["var"] | ||
191 | if "exp" in groupd and groupd["exp"] != None: | ||
192 | bb.data.setVarFlag(key, "export", 1, data) | ||
193 | if "ques" in groupd and groupd["ques"] != None: | ||
194 | val = getFunc(groupd, key, data) | ||
195 | if val == None: | ||
196 | val = groupd["value"] | ||
197 | elif "colon" in groupd and groupd["colon"] != None: | ||
198 | e = data.createCopy() | ||
199 | bb.data.update_data(e) | ||
200 | val = bb.data.expand(groupd["value"], e) | ||
201 | elif "append" in groupd and groupd["append"] != None: | ||
202 | val = "%s %s" % ((getFunc(groupd, key, data) or ""), groupd["value"]) | ||
203 | elif "prepend" in groupd and groupd["prepend"] != None: | ||
204 | val = "%s %s" % (groupd["value"], (getFunc(groupd, key, data) or "")) | ||
205 | elif "postdot" in groupd and groupd["postdot"] != None: | ||
206 | val = "%s%s" % ((getFunc(groupd, key, data) or ""), groupd["value"]) | ||
207 | elif "predot" in groupd and groupd["predot"] != None: | ||
208 | val = "%s%s" % (groupd["value"], (getFunc(groupd, key, data) or "")) | ||
209 | else: | ||
210 | val = groupd["value"] | ||
211 | if 'flag' in groupd and groupd['flag'] != None: | ||
212 | bb.msg.debug(3, bb.msg.domain.Parsing, "setVarFlag(%s, %s, %s, data)" % (key, groupd['flag'], val)) | ||
213 | bb.data.setVarFlag(key, groupd['flag'], val, data) | ||
214 | else: | ||
215 | bb.data.setVar(key, val, data) | ||
216 | return | ||
217 | |||
218 | m = __include_regexp__.match(s) | ||
219 | if m: | ||
220 | s = bb.data.expand(m.group(1), data) | ||
221 | bb.msg.debug(3, bb.msg.domain.Parsing, "CONF %s:%d: including %s" % (fn, lineno, s)) | ||
222 | include(fn, s, data, False) | ||
223 | return | ||
224 | |||
225 | m = __require_regexp__.match(s) | ||
226 | if m: | ||
227 | s = bb.data.expand(m.group(1), data) | ||
228 | include(fn, s, data, "include required") | ||
229 | return | ||
230 | |||
231 | m = __export_regexp__.match(s) | ||
232 | if m: | ||
233 | bb.data.setVarFlag(m.group(1), "export", 1, data) | ||
234 | return | ||
235 | |||
236 | raise ParseError("%s:%d: unparsed line: '%s'" % (fn, lineno, s)); | ||
237 | |||
238 | # Add us to the handlers list | ||
239 | from bb.parse import handlers | ||
240 | handlers.append({'supports': supports, 'handle': handle, 'init': init}) | ||
241 | del handlers | ||
diff --git a/bitbake-dev/lib/bb/parse/parse_py/__init__.py b/bitbake-dev/lib/bb/parse/parse_py/__init__.py deleted file mode 100644 index 9e0e00adda..0000000000 --- a/bitbake-dev/lib/bb/parse/parse_py/__init__.py +++ /dev/null | |||
@@ -1,33 +0,0 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # ex:ts=4:sw=4:sts=4:et | ||
3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
4 | """ | ||
5 | BitBake Parsers | ||
6 | |||
7 | File parsers for the BitBake build tools. | ||
8 | |||
9 | """ | ||
10 | |||
11 | # Copyright (C) 2003, 2004 Chris Larson | ||
12 | # Copyright (C) 2003, 2004 Phil Blundell | ||
13 | # | ||
14 | # This program is free software; you can redistribute it and/or modify | ||
15 | # it under the terms of the GNU General Public License version 2 as | ||
16 | # published by the Free Software Foundation. | ||
17 | # | ||
18 | # This program is distributed in the hope that it will be useful, | ||
19 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
20 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
21 | # GNU General Public License for more details. | ||
22 | # | ||
23 | # You should have received a copy of the GNU General Public License along | ||
24 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
25 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
26 | # | ||
27 | # Based on functions from the base bb module, Copyright 2003 Holger Schurig | ||
28 | __version__ = '1.0' | ||
29 | |||
30 | __all__ = [ 'ConfHandler', 'BBHandler'] | ||
31 | |||
32 | import ConfHandler | ||
33 | import BBHandler | ||
diff --git a/bitbake-dev/lib/bb/persist_data.py b/bitbake-dev/lib/bb/persist_data.py deleted file mode 100644 index bc4045fe85..0000000000 --- a/bitbake-dev/lib/bb/persist_data.py +++ /dev/null | |||
@@ -1,121 +0,0 @@ | |||
1 | # BitBake Persistent Data Store | ||
2 | # | ||
3 | # Copyright (C) 2007 Richard Purdie | ||
4 | # | ||
5 | # This program is free software; you can redistribute it and/or modify | ||
6 | # it under the terms of the GNU General Public License version 2 as | ||
7 | # published by the Free Software Foundation. | ||
8 | # | ||
9 | # This program is distributed in the hope that it will be useful, | ||
10 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
11 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
12 | # GNU General Public License for more details. | ||
13 | # | ||
14 | # You should have received a copy of the GNU General Public License along | ||
15 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
16 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
17 | |||
18 | import bb, os | ||
19 | |||
20 | try: | ||
21 | import sqlite3 | ||
22 | except ImportError: | ||
23 | try: | ||
24 | from pysqlite2 import dbapi2 as sqlite3 | ||
25 | except ImportError: | ||
26 | bb.msg.fatal(bb.msg.domain.PersistData, "Importing sqlite3 and pysqlite2 failed, please install one of them. Python 2.5 or a 'python-pysqlite2' like package is likely to be what you need.") | ||
27 | |||
28 | sqlversion = sqlite3.sqlite_version_info | ||
29 | if sqlversion[0] < 3 or (sqlversion[0] == 3 and sqlversion[1] < 3): | ||
30 | bb.msg.fatal(bb.msg.domain.PersistData, "sqlite3 version 3.3.0 or later is required.") | ||
31 | |||
32 | class PersistData: | ||
33 | """ | ||
34 | BitBake Persistent Data Store | ||
35 | |||
36 | Used to store data in a central location such that other threads/tasks can | ||
37 | access them at some future date. | ||
38 | |||
39 | The "domain" is used as a key to isolate each data pool and in this | ||
40 | implementation corresponds to an SQL table. The SQL table consists of a | ||
41 | simple key and value pair. | ||
42 | |||
43 | Why sqlite? It handles all the locking issues for us. | ||
44 | """ | ||
45 | def __init__(self, d): | ||
46 | self.cachedir = bb.data.getVar("PERSISTENT_DIR", d, True) or bb.data.getVar("CACHE", d, True) | ||
47 | if self.cachedir in [None, '']: | ||
48 | bb.msg.fatal(bb.msg.domain.PersistData, "Please set the 'PERSISTENT_DIR' or 'CACHE' variable.") | ||
49 | try: | ||
50 | os.stat(self.cachedir) | ||
51 | except OSError: | ||
52 | bb.mkdirhier(self.cachedir) | ||
53 | |||
54 | self.cachefile = os.path.join(self.cachedir,"bb_persist_data.sqlite3") | ||
55 | bb.msg.debug(1, bb.msg.domain.PersistData, "Using '%s' as the persistent data cache" % self.cachefile) | ||
56 | |||
57 | self.connection = sqlite3.connect(self.cachefile, timeout=5, isolation_level=None) | ||
58 | |||
59 | def addDomain(self, domain): | ||
60 | """ | ||
61 | Should be called before any domain is used | ||
62 | Creates it if it doesn't exist. | ||
63 | """ | ||
64 | self.connection.execute("CREATE TABLE IF NOT EXISTS %s(key TEXT, value TEXT);" % domain) | ||
65 | |||
66 | def delDomain(self, domain): | ||
67 | """ | ||
68 | Removes a domain and all the data it contains | ||
69 | """ | ||
70 | self.connection.execute("DROP TABLE IF EXISTS %s;" % domain) | ||
71 | |||
72 | def getKeyValues(self, domain): | ||
73 | """ | ||
74 | Return a list of key + value pairs for a domain | ||
75 | """ | ||
76 | ret = {} | ||
77 | data = self.connection.execute("SELECT key, value from %s;" % domain) | ||
78 | for row in data: | ||
79 | ret[str(row[0])] = str(row[1]) | ||
80 | |||
81 | return ret | ||
82 | |||
83 | def getValue(self, domain, key): | ||
84 | """ | ||
85 | Return the value of a key for a domain | ||
86 | """ | ||
87 | data = self.connection.execute("SELECT * from %s where key=?;" % domain, [key]) | ||
88 | for row in data: | ||
89 | return row[1] | ||
90 | |||
91 | def setValue(self, domain, key, value): | ||
92 | """ | ||
93 | Sets the value of a key for a domain | ||
94 | """ | ||
95 | data = self.connection.execute("SELECT * from %s where key=?;" % domain, [key]) | ||
96 | rows = 0 | ||
97 | for row in data: | ||
98 | rows = rows + 1 | ||
99 | if rows: | ||
100 | self._execute("UPDATE %s SET value=? WHERE key=?;" % domain, [value, key]) | ||
101 | else: | ||
102 | self._execute("INSERT into %s(key, value) values (?, ?);" % domain, [key, value]) | ||
103 | |||
104 | def delValue(self, domain, key): | ||
105 | """ | ||
106 | Deletes a key/value pair | ||
107 | """ | ||
108 | self._execute("DELETE from %s where key=?;" % domain, [key]) | ||
109 | |||
110 | def _execute(self, *query): | ||
111 | while True: | ||
112 | try: | ||
113 | self.connection.execute(*query) | ||
114 | return | ||
115 | except sqlite3.OperationalError, e: | ||
116 | if 'database is locked' in str(e): | ||
117 | continue | ||
118 | raise | ||
119 | |||
120 | |||
121 | |||
diff --git a/bitbake-dev/lib/bb/providers.py b/bitbake-dev/lib/bb/providers.py deleted file mode 100644 index 8617251ca3..0000000000 --- a/bitbake-dev/lib/bb/providers.py +++ /dev/null | |||
@@ -1,327 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (C) 2003, 2004 Chris Larson | ||
5 | # Copyright (C) 2003, 2004 Phil Blundell | ||
6 | # Copyright (C) 2003 - 2005 Michael 'Mickey' Lauer | ||
7 | # Copyright (C) 2005 Holger Hans Peter Freyther | ||
8 | # Copyright (C) 2005 ROAD GmbH | ||
9 | # Copyright (C) 2006 Richard Purdie | ||
10 | # | ||
11 | # This program is free software; you can redistribute it and/or modify | ||
12 | # it under the terms of the GNU General Public License version 2 as | ||
13 | # published by the Free Software Foundation. | ||
14 | # | ||
15 | # This program is distributed in the hope that it will be useful, | ||
16 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
17 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
18 | # GNU General Public License for more details. | ||
19 | # | ||
20 | # You should have received a copy of the GNU General Public License along | ||
21 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
22 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
23 | |||
24 | import re | ||
25 | from bb import data, utils | ||
26 | import bb | ||
27 | |||
28 | class NoProvider(Exception): | ||
29 | """Exception raised when no provider of a build dependency can be found""" | ||
30 | |||
31 | class NoRProvider(Exception): | ||
32 | """Exception raised when no provider of a runtime dependency can be found""" | ||
33 | |||
34 | |||
35 | def sortPriorities(pn, dataCache, pkg_pn = None): | ||
36 | """ | ||
37 | Reorder pkg_pn by file priority and default preference | ||
38 | """ | ||
39 | |||
40 | if not pkg_pn: | ||
41 | pkg_pn = dataCache.pkg_pn | ||
42 | |||
43 | files = pkg_pn[pn] | ||
44 | priorities = {} | ||
45 | for f in files: | ||
46 | priority = dataCache.bbfile_priority[f] | ||
47 | preference = dataCache.pkg_dp[f] | ||
48 | if priority not in priorities: | ||
49 | priorities[priority] = {} | ||
50 | if preference not in priorities[priority]: | ||
51 | priorities[priority][preference] = [] | ||
52 | priorities[priority][preference].append(f) | ||
53 | pri_list = priorities.keys() | ||
54 | pri_list.sort(lambda a, b: a - b) | ||
55 | tmp_pn = [] | ||
56 | for pri in pri_list: | ||
57 | pref_list = priorities[pri].keys() | ||
58 | pref_list.sort(lambda a, b: b - a) | ||
59 | tmp_pref = [] | ||
60 | for pref in pref_list: | ||
61 | tmp_pref.extend(priorities[pri][pref]) | ||
62 | tmp_pn = [tmp_pref] + tmp_pn | ||
63 | |||
64 | return tmp_pn | ||
65 | |||
66 | def preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): | ||
67 | """ | ||
68 | Check if the version pe,pv,pr is the preferred one. | ||
69 | If there is preferred version defined and ends with '%', then pv has to start with that version after removing the '%' | ||
70 | """ | ||
71 | if (pr == preferred_r or preferred_r == None): | ||
72 | if (pe == preferred_e or preferred_e == None): | ||
73 | if preferred_v == pv: | ||
74 | return True | ||
75 | if preferred_v != None and preferred_v.endswith('%') and pv.startswith(preferred_v[:len(preferred_v)-1]): | ||
76 | return True | ||
77 | return False | ||
78 | |||
79 | def findPreferredProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): | ||
80 | """ | ||
81 | Find the first provider in pkg_pn with a PREFERRED_VERSION set. | ||
82 | """ | ||
83 | |||
84 | preferred_file = None | ||
85 | preferred_ver = None | ||
86 | |||
87 | localdata = data.createCopy(cfgData) | ||
88 | bb.data.setVar('OVERRIDES', "pn-%s:%s:%s" % (pn, pn, data.getVar('OVERRIDES', localdata)), localdata) | ||
89 | bb.data.update_data(localdata) | ||
90 | |||
91 | preferred_v = bb.data.getVar('PREFERRED_VERSION_%s' % pn, localdata, True) | ||
92 | if preferred_v: | ||
93 | m = re.match('(\d+:)*(.*)(_.*)*', preferred_v) | ||
94 | if m: | ||
95 | if m.group(1): | ||
96 | preferred_e = int(m.group(1)[:-1]) | ||
97 | else: | ||
98 | preferred_e = None | ||
99 | preferred_v = m.group(2) | ||
100 | if m.group(3): | ||
101 | preferred_r = m.group(3)[1:] | ||
102 | else: | ||
103 | preferred_r = None | ||
104 | else: | ||
105 | preferred_e = None | ||
106 | preferred_r = None | ||
107 | |||
108 | for file_set in pkg_pn: | ||
109 | for f in file_set: | ||
110 | pe,pv,pr = dataCache.pkg_pepvpr[f] | ||
111 | if preferredVersionMatch(pe, pv, pr, preferred_e, preferred_v, preferred_r): | ||
112 | preferred_file = f | ||
113 | preferred_ver = (pe, pv, pr) | ||
114 | break | ||
115 | if preferred_file: | ||
116 | break; | ||
117 | if preferred_r: | ||
118 | pv_str = '%s-%s' % (preferred_v, preferred_r) | ||
119 | else: | ||
120 | pv_str = preferred_v | ||
121 | if not (preferred_e is None): | ||
122 | pv_str = '%s:%s' % (preferred_e, pv_str) | ||
123 | itemstr = "" | ||
124 | if item: | ||
125 | itemstr = " (for item %s)" % item | ||
126 | if preferred_file is None: | ||
127 | bb.msg.note(1, bb.msg.domain.Provider, "preferred version %s of %s not available%s" % (pv_str, pn, itemstr)) | ||
128 | else: | ||
129 | bb.msg.debug(1, bb.msg.domain.Provider, "selecting %s as PREFERRED_VERSION %s of package %s%s" % (preferred_file, pv_str, pn, itemstr)) | ||
130 | |||
131 | return (preferred_ver, preferred_file) | ||
132 | |||
133 | |||
134 | def findLatestProvider(pn, cfgData, dataCache, file_set): | ||
135 | """ | ||
136 | Return the highest version of the providers in file_set. | ||
137 | Take default preferences into account. | ||
138 | """ | ||
139 | latest = None | ||
140 | latest_p = 0 | ||
141 | latest_f = None | ||
142 | for file_name in file_set: | ||
143 | pe,pv,pr = dataCache.pkg_pepvpr[file_name] | ||
144 | dp = dataCache.pkg_dp[file_name] | ||
145 | |||
146 | if (latest is None) or ((latest_p == dp) and (utils.vercmp(latest, (pe, pv, pr)) < 0)) or (dp > latest_p): | ||
147 | latest = (pe, pv, pr) | ||
148 | latest_f = file_name | ||
149 | latest_p = dp | ||
150 | |||
151 | return (latest, latest_f) | ||
152 | |||
153 | |||
154 | def findBestProvider(pn, cfgData, dataCache, pkg_pn = None, item = None): | ||
155 | """ | ||
156 | If there is a PREFERRED_VERSION, find the highest-priority bbfile | ||
157 | providing that version. If not, find the latest version provided by | ||
158 | an bbfile in the highest-priority set. | ||
159 | """ | ||
160 | |||
161 | sortpkg_pn = sortPriorities(pn, dataCache, pkg_pn) | ||
162 | # Find the highest priority provider with a PREFERRED_VERSION set | ||
163 | (preferred_ver, preferred_file) = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn, item) | ||
164 | # Find the latest version of the highest priority provider | ||
165 | (latest, latest_f) = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[0]) | ||
166 | |||
167 | if preferred_file is None: | ||
168 | preferred_file = latest_f | ||
169 | preferred_ver = latest | ||
170 | |||
171 | return (latest, latest_f, preferred_ver, preferred_file) | ||
172 | |||
173 | |||
174 | def _filterProviders(providers, item, cfgData, dataCache): | ||
175 | """ | ||
176 | Take a list of providers and filter/reorder according to the | ||
177 | environment variables and previous build results | ||
178 | """ | ||
179 | eligible = [] | ||
180 | preferred_versions = {} | ||
181 | sortpkg_pn = {} | ||
182 | |||
183 | # The order of providers depends on the order of the files on the disk | ||
184 | # up to here. Sort pkg_pn to make dependency issues reproducible rather | ||
185 | # than effectively random. | ||
186 | providers.sort() | ||
187 | |||
188 | # Collate providers by PN | ||
189 | pkg_pn = {} | ||
190 | for p in providers: | ||
191 | pn = dataCache.pkg_fn[p] | ||
192 | if pn not in pkg_pn: | ||
193 | pkg_pn[pn] = [] | ||
194 | pkg_pn[pn].append(p) | ||
195 | |||
196 | bb.msg.debug(1, bb.msg.domain.Provider, "providers for %s are: %s" % (item, pkg_pn.keys())) | ||
197 | |||
198 | # First add PREFERRED_VERSIONS | ||
199 | for pn in pkg_pn.keys(): | ||
200 | sortpkg_pn[pn] = sortPriorities(pn, dataCache, pkg_pn) | ||
201 | preferred_versions[pn] = findPreferredProvider(pn, cfgData, dataCache, sortpkg_pn[pn], item) | ||
202 | if preferred_versions[pn][1]: | ||
203 | eligible.append(preferred_versions[pn][1]) | ||
204 | |||
205 | # Now add latest verisons | ||
206 | for pn in sortpkg_pn.keys(): | ||
207 | if pn in preferred_versions and preferred_versions[pn][1]: | ||
208 | continue | ||
209 | preferred_versions[pn] = findLatestProvider(pn, cfgData, dataCache, sortpkg_pn[pn][0]) | ||
210 | eligible.append(preferred_versions[pn][1]) | ||
211 | |||
212 | if len(eligible) == 0: | ||
213 | bb.msg.error(bb.msg.domain.Provider, "no eligible providers for %s" % item) | ||
214 | return 0 | ||
215 | |||
216 | # If pn == item, give it a slight default preference | ||
217 | # This means PREFERRED_PROVIDER_foobar defaults to foobar if available | ||
218 | for p in providers: | ||
219 | pn = dataCache.pkg_fn[p] | ||
220 | if pn != item: | ||
221 | continue | ||
222 | (newvers, fn) = preferred_versions[pn] | ||
223 | if not fn in eligible: | ||
224 | continue | ||
225 | eligible.remove(fn) | ||
226 | eligible = [fn] + eligible | ||
227 | |||
228 | return eligible | ||
229 | |||
230 | |||
231 | def filterProviders(providers, item, cfgData, dataCache): | ||
232 | """ | ||
233 | Take a list of providers and filter/reorder according to the | ||
234 | environment variables and previous build results | ||
235 | Takes a "normal" target item | ||
236 | """ | ||
237 | |||
238 | eligible = _filterProviders(providers, item, cfgData, dataCache) | ||
239 | |||
240 | prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % item, cfgData, 1) | ||
241 | if prefervar: | ||
242 | dataCache.preferred[item] = prefervar | ||
243 | |||
244 | foundUnique = False | ||
245 | if item in dataCache.preferred: | ||
246 | for p in eligible: | ||
247 | pn = dataCache.pkg_fn[p] | ||
248 | if dataCache.preferred[item] == pn: | ||
249 | bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy %s due to PREFERRED_PROVIDERS" % (pn, item)) | ||
250 | eligible.remove(p) | ||
251 | eligible = [p] + eligible | ||
252 | foundUnique = True | ||
253 | break | ||
254 | |||
255 | bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible)) | ||
256 | |||
257 | return eligible, foundUnique | ||
258 | |||
259 | def filterProvidersRunTime(providers, item, cfgData, dataCache): | ||
260 | """ | ||
261 | Take a list of providers and filter/reorder according to the | ||
262 | environment variables and previous build results | ||
263 | Takes a "runtime" target item | ||
264 | """ | ||
265 | |||
266 | eligible = _filterProviders(providers, item, cfgData, dataCache) | ||
267 | |||
268 | # Should use dataCache.preferred here? | ||
269 | preferred = [] | ||
270 | preferred_vars = [] | ||
271 | for p in eligible: | ||
272 | pn = dataCache.pkg_fn[p] | ||
273 | provides = dataCache.pn_provides[pn] | ||
274 | for provide in provides: | ||
275 | bb.msg.note(2, bb.msg.domain.Provider, "checking PREFERRED_PROVIDER_%s" % (provide)) | ||
276 | prefervar = bb.data.getVar('PREFERRED_PROVIDER_%s' % provide, cfgData, 1) | ||
277 | if prefervar == pn: | ||
278 | var = "PREFERRED_PROVIDER_%s = %s" % (provide, prefervar) | ||
279 | bb.msg.note(2, bb.msg.domain.Provider, "selecting %s to satisfy runtime %s due to %s" % (pn, item, var)) | ||
280 | preferred_vars.append(var) | ||
281 | eligible.remove(p) | ||
282 | eligible = [p] + eligible | ||
283 | preferred.append(p) | ||
284 | break | ||
285 | |||
286 | numberPreferred = len(preferred) | ||
287 | |||
288 | if numberPreferred > 1: | ||
289 | bb.msg.error(bb.msg.domain.Provider, "Conflicting PREFERRED_PROVIDER entries were found which resulted in an attempt to select multiple providers (%s) for runtime dependecy %s\nThe entries resulting in this conflict were: %s" % (preferred, item, preferred_vars)) | ||
290 | |||
291 | bb.msg.debug(1, bb.msg.domain.Provider, "sorted providers for %s are: %s" % (item, eligible)) | ||
292 | |||
293 | return eligible, numberPreferred | ||
294 | |||
295 | regexp_cache = {} | ||
296 | |||
297 | def getRuntimeProviders(dataCache, rdepend): | ||
298 | """ | ||
299 | Return any providers of runtime dependency | ||
300 | """ | ||
301 | rproviders = [] | ||
302 | |||
303 | if rdepend in dataCache.rproviders: | ||
304 | rproviders += dataCache.rproviders[rdepend] | ||
305 | |||
306 | if rdepend in dataCache.packages: | ||
307 | rproviders += dataCache.packages[rdepend] | ||
308 | |||
309 | if rproviders: | ||
310 | return rproviders | ||
311 | |||
312 | # Only search dynamic packages if we can't find anything in other variables | ||
313 | for pattern in dataCache.packages_dynamic: | ||
314 | pattern = pattern.replace('+', "\+") | ||
315 | if pattern in regexp_cache: | ||
316 | regexp = regexp_cache[pattern] | ||
317 | else: | ||
318 | try: | ||
319 | regexp = re.compile(pattern) | ||
320 | except: | ||
321 | bb.msg.error(bb.msg.domain.Provider, "Error parsing re expression: %s" % pattern) | ||
322 | raise | ||
323 | regexp_cache[pattern] = regexp | ||
324 | if regexp.match(rdepend): | ||
325 | rproviders += dataCache.packages_dynamic[pattern] | ||
326 | |||
327 | return rproviders | ||
diff --git a/bitbake-dev/lib/bb/runqueue.py b/bitbake-dev/lib/bb/runqueue.py deleted file mode 100644 index c3ad442e47..0000000000 --- a/bitbake-dev/lib/bb/runqueue.py +++ /dev/null | |||
@@ -1,1174 +0,0 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # ex:ts=4:sw=4:sts=4:et | ||
3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
4 | """ | ||
5 | BitBake 'RunQueue' implementation | ||
6 | |||
7 | Handles preparation and execution of a queue of tasks | ||
8 | """ | ||
9 | |||
10 | # Copyright (C) 2006-2007 Richard Purdie | ||
11 | # | ||
12 | # This program is free software; you can redistribute it and/or modify | ||
13 | # it under the terms of the GNU General Public License version 2 as | ||
14 | # published by the Free Software Foundation. | ||
15 | # | ||
16 | # This program is distributed in the hope that it will be useful, | ||
17 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
18 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
19 | # GNU General Public License for more details. | ||
20 | # | ||
21 | # You should have received a copy of the GNU General Public License along | ||
22 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
23 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
24 | |||
25 | from bb import msg, data, event, mkdirhier, utils | ||
26 | import bb, os, sys | ||
27 | import signal | ||
28 | import stat | ||
29 | |||
30 | class TaskFailure(Exception): | ||
31 | """Exception raised when a task in a runqueue fails""" | ||
32 | def __init__(self, x): | ||
33 | self.args = x | ||
34 | |||
35 | |||
36 | class RunQueueStats: | ||
37 | """ | ||
38 | Holds statistics on the tasks handled by the associated runQueue | ||
39 | """ | ||
40 | def __init__(self, total): | ||
41 | self.completed = 0 | ||
42 | self.skipped = 0 | ||
43 | self.failed = 0 | ||
44 | self.active = 0 | ||
45 | self.total = total | ||
46 | |||
47 | def taskFailed(self): | ||
48 | self.active = self.active - 1 | ||
49 | self.failed = self.failed + 1 | ||
50 | |||
51 | def taskCompleted(self, number = 1): | ||
52 | self.active = self.active - number | ||
53 | self.completed = self.completed + number | ||
54 | |||
55 | def taskSkipped(self, number = 1): | ||
56 | self.active = self.active + number | ||
57 | self.skipped = self.skipped + number | ||
58 | |||
59 | def taskActive(self): | ||
60 | self.active = self.active + 1 | ||
61 | |||
62 | # These values indicate the next step due to be run in the | ||
63 | # runQueue state machine | ||
64 | runQueuePrepare = 2 | ||
65 | runQueueRunInit = 3 | ||
66 | runQueueRunning = 4 | ||
67 | runQueueFailed = 6 | ||
68 | runQueueCleanUp = 7 | ||
69 | runQueueComplete = 8 | ||
70 | runQueueChildProcess = 9 | ||
71 | |||
72 | class RunQueueScheduler: | ||
73 | """ | ||
74 | Control the order tasks are scheduled in. | ||
75 | """ | ||
76 | def __init__(self, runqueue): | ||
77 | """ | ||
78 | The default scheduler just returns the first buildable task (the | ||
79 | priority map is sorted by task numer) | ||
80 | """ | ||
81 | self.rq = runqueue | ||
82 | numTasks = len(self.rq.runq_fnid) | ||
83 | |||
84 | self.prio_map = [] | ||
85 | self.prio_map.extend(range(numTasks)) | ||
86 | |||
87 | def next(self): | ||
88 | """ | ||
89 | Return the id of the first task we find that is buildable | ||
90 | """ | ||
91 | for task1 in range(len(self.rq.runq_fnid)): | ||
92 | task = self.prio_map[task1] | ||
93 | if self.rq.runq_running[task] == 1: | ||
94 | continue | ||
95 | if self.rq.runq_buildable[task] == 1: | ||
96 | return task | ||
97 | |||
98 | class RunQueueSchedulerSpeed(RunQueueScheduler): | ||
99 | """ | ||
100 | A scheduler optimised for speed. The priority map is sorted by task weight, | ||
101 | heavier weighted tasks (tasks needed by the most other tasks) are run first. | ||
102 | """ | ||
103 | def __init__(self, runqueue): | ||
104 | """ | ||
105 | The priority map is sorted by task weight. | ||
106 | """ | ||
107 | from copy import deepcopy | ||
108 | |||
109 | self.rq = runqueue | ||
110 | |||
111 | sortweight = deepcopy(self.rq.runq_weight) | ||
112 | sortweight.sort() | ||
113 | copyweight = deepcopy(self.rq.runq_weight) | ||
114 | self.prio_map = [] | ||
115 | |||
116 | for weight in sortweight: | ||
117 | idx = copyweight.index(weight) | ||
118 | self.prio_map.append(idx) | ||
119 | copyweight[idx] = -1 | ||
120 | |||
121 | self.prio_map.reverse() | ||
122 | |||
123 | class RunQueueSchedulerCompletion(RunQueueSchedulerSpeed): | ||
124 | """ | ||
125 | A scheduler optimised to complete .bb files are quickly as possible. The | ||
126 | priority map is sorted by task weight, but then reordered so once a given | ||
127 | .bb file starts to build, its completed as quickly as possible. This works | ||
128 | well where disk space is at a premium and classes like OE's rm_work are in | ||
129 | force. | ||
130 | """ | ||
131 | def __init__(self, runqueue): | ||
132 | RunQueueSchedulerSpeed.__init__(self, runqueue) | ||
133 | from copy import deepcopy | ||
134 | |||
135 | #FIXME - whilst this groups all fnids together it does not reorder the | ||
136 | #fnid groups optimally. | ||
137 | |||
138 | basemap = deepcopy(self.prio_map) | ||
139 | self.prio_map = [] | ||
140 | while (len(basemap) > 0): | ||
141 | entry = basemap.pop(0) | ||
142 | self.prio_map.append(entry) | ||
143 | fnid = self.rq.runq_fnid[entry] | ||
144 | todel = [] | ||
145 | for entry in basemap: | ||
146 | entry_fnid = self.rq.runq_fnid[entry] | ||
147 | if entry_fnid == fnid: | ||
148 | todel.append(basemap.index(entry)) | ||
149 | self.prio_map.append(entry) | ||
150 | todel.reverse() | ||
151 | for idx in todel: | ||
152 | del basemap[idx] | ||
153 | |||
154 | class RunQueue: | ||
155 | """ | ||
156 | BitBake Run Queue implementation | ||
157 | """ | ||
158 | def __init__(self, cooker, cfgData, dataCache, taskData, targets): | ||
159 | self.reset_runqueue() | ||
160 | self.cooker = cooker | ||
161 | self.dataCache = dataCache | ||
162 | self.taskData = taskData | ||
163 | self.cfgData = cfgData | ||
164 | self.targets = targets | ||
165 | |||
166 | self.number_tasks = int(bb.data.getVar("BB_NUMBER_THREADS", cfgData, 1) or 1) | ||
167 | self.multi_provider_whitelist = (bb.data.getVar("MULTI_PROVIDER_WHITELIST", cfgData, 1) or "").split() | ||
168 | self.scheduler = bb.data.getVar("BB_SCHEDULER", cfgData, 1) or "speed" | ||
169 | self.stamppolicy = bb.data.getVar("BB_STAMP_POLICY", cfgData, 1) or "perfile" | ||
170 | self.stampwhitelist = bb.data.getVar("BB_STAMP_WHITELIST", cfgData, 1) or "" | ||
171 | |||
172 | def reset_runqueue(self): | ||
173 | self.runq_fnid = [] | ||
174 | self.runq_task = [] | ||
175 | self.runq_depends = [] | ||
176 | self.runq_revdeps = [] | ||
177 | |||
178 | self.state = runQueuePrepare | ||
179 | |||
180 | def get_user_idstring(self, task): | ||
181 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
182 | taskname = self.runq_task[task] | ||
183 | return "%s, %s" % (fn, taskname) | ||
184 | |||
185 | def get_task_id(self, fnid, taskname): | ||
186 | for listid in range(len(self.runq_fnid)): | ||
187 | if self.runq_fnid[listid] == fnid and self.runq_task[listid] == taskname: | ||
188 | return listid | ||
189 | return None | ||
190 | |||
191 | def circular_depchains_handler(self, tasks): | ||
192 | """ | ||
193 | Some tasks aren't buildable, likely due to circular dependency issues. | ||
194 | Identify the circular dependencies and print them in a user readable format. | ||
195 | """ | ||
196 | from copy import deepcopy | ||
197 | |||
198 | valid_chains = [] | ||
199 | explored_deps = {} | ||
200 | msgs = [] | ||
201 | |||
202 | def chain_reorder(chain): | ||
203 | """ | ||
204 | Reorder a dependency chain so the lowest task id is first | ||
205 | """ | ||
206 | lowest = 0 | ||
207 | new_chain = [] | ||
208 | for entry in range(len(chain)): | ||
209 | if chain[entry] < chain[lowest]: | ||
210 | lowest = entry | ||
211 | new_chain.extend(chain[lowest:]) | ||
212 | new_chain.extend(chain[:lowest]) | ||
213 | return new_chain | ||
214 | |||
215 | def chain_compare_equal(chain1, chain2): | ||
216 | """ | ||
217 | Compare two dependency chains and see if they're the same | ||
218 | """ | ||
219 | if len(chain1) != len(chain2): | ||
220 | return False | ||
221 | for index in range(len(chain1)): | ||
222 | if chain1[index] != chain2[index]: | ||
223 | return False | ||
224 | return True | ||
225 | |||
226 | def chain_array_contains(chain, chain_array): | ||
227 | """ | ||
228 | Return True if chain_array contains chain | ||
229 | """ | ||
230 | for ch in chain_array: | ||
231 | if chain_compare_equal(ch, chain): | ||
232 | return True | ||
233 | return False | ||
234 | |||
235 | def find_chains(taskid, prev_chain): | ||
236 | prev_chain.append(taskid) | ||
237 | total_deps = [] | ||
238 | total_deps.extend(self.runq_revdeps[taskid]) | ||
239 | for revdep in self.runq_revdeps[taskid]: | ||
240 | if revdep in prev_chain: | ||
241 | idx = prev_chain.index(revdep) | ||
242 | # To prevent duplicates, reorder the chain to start with the lowest taskid | ||
243 | # and search through an array of those we've already printed | ||
244 | chain = prev_chain[idx:] | ||
245 | new_chain = chain_reorder(chain) | ||
246 | if not chain_array_contains(new_chain, valid_chains): | ||
247 | valid_chains.append(new_chain) | ||
248 | msgs.append("Dependency loop #%d found:\n" % len(valid_chains)) | ||
249 | for dep in new_chain: | ||
250 | msgs.append(" Task %s (%s) (depends: %s)\n" % (dep, self.get_user_idstring(dep), self.runq_depends[dep])) | ||
251 | msgs.append("\n") | ||
252 | if len(valid_chains) > 10: | ||
253 | msgs.append("Aborted dependency loops search after 10 matches.\n") | ||
254 | return msgs | ||
255 | continue | ||
256 | scan = False | ||
257 | if revdep not in explored_deps: | ||
258 | scan = True | ||
259 | elif revdep in explored_deps[revdep]: | ||
260 | scan = True | ||
261 | else: | ||
262 | for dep in prev_chain: | ||
263 | if dep in explored_deps[revdep]: | ||
264 | scan = True | ||
265 | if scan: | ||
266 | find_chains(revdep, deepcopy(prev_chain)) | ||
267 | for dep in explored_deps[revdep]: | ||
268 | if dep not in total_deps: | ||
269 | total_deps.append(dep) | ||
270 | |||
271 | explored_deps[taskid] = total_deps | ||
272 | |||
273 | for task in tasks: | ||
274 | find_chains(task, []) | ||
275 | |||
276 | return msgs | ||
277 | |||
278 | def calculate_task_weights(self, endpoints): | ||
279 | """ | ||
280 | Calculate a number representing the "weight" of each task. Heavier weighted tasks | ||
281 | have more dependencies and hence should be executed sooner for maximum speed. | ||
282 | |||
283 | This function also sanity checks the task list finding tasks that its not | ||
284 | possible to execute due to circular dependencies. | ||
285 | """ | ||
286 | |||
287 | numTasks = len(self.runq_fnid) | ||
288 | weight = [] | ||
289 | deps_left = [] | ||
290 | task_done = [] | ||
291 | |||
292 | for listid in range(numTasks): | ||
293 | task_done.append(False) | ||
294 | weight.append(0) | ||
295 | deps_left.append(len(self.runq_revdeps[listid])) | ||
296 | |||
297 | for listid in endpoints: | ||
298 | weight[listid] = 1 | ||
299 | task_done[listid] = True | ||
300 | |||
301 | while 1: | ||
302 | next_points = [] | ||
303 | for listid in endpoints: | ||
304 | for revdep in self.runq_depends[listid]: | ||
305 | weight[revdep] = weight[revdep] + weight[listid] | ||
306 | deps_left[revdep] = deps_left[revdep] - 1 | ||
307 | if deps_left[revdep] == 0: | ||
308 | next_points.append(revdep) | ||
309 | task_done[revdep] = True | ||
310 | endpoints = next_points | ||
311 | if len(next_points) == 0: | ||
312 | break | ||
313 | |||
314 | # Circular dependency sanity check | ||
315 | problem_tasks = [] | ||
316 | for task in range(numTasks): | ||
317 | if task_done[task] is False or deps_left[task] != 0: | ||
318 | problem_tasks.append(task) | ||
319 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s) is not buildable\n" % (task, self.get_user_idstring(task))) | ||
320 | bb.msg.debug(2, bb.msg.domain.RunQueue, "(Complete marker was %s and the remaining dependency count was %s)\n\n" % (task_done[task], deps_left[task])) | ||
321 | |||
322 | if problem_tasks: | ||
323 | message = "Unbuildable tasks were found.\n" | ||
324 | message = message + "These are usually caused by circular dependencies and any circular dependency chains found will be printed below. Increase the debug level to see a list of unbuildable tasks.\n\n" | ||
325 | message = message + "Identifying dependency loops (this may take a short while)...\n" | ||
326 | bb.msg.error(bb.msg.domain.RunQueue, message) | ||
327 | |||
328 | msgs = self.circular_depchains_handler(problem_tasks) | ||
329 | |||
330 | message = "\n" | ||
331 | for msg in msgs: | ||
332 | message = message + msg | ||
333 | bb.msg.fatal(bb.msg.domain.RunQueue, message) | ||
334 | |||
335 | return weight | ||
336 | |||
337 | def prepare_runqueue(self): | ||
338 | """ | ||
339 | Turn a set of taskData into a RunQueue and compute data needed | ||
340 | to optimise the execution order. | ||
341 | """ | ||
342 | |||
343 | runq_build = [] | ||
344 | recursive_tdepends = {} | ||
345 | runq_recrdepends = [] | ||
346 | tdepends_fnid = {} | ||
347 | |||
348 | taskData = self.taskData | ||
349 | |||
350 | if len(taskData.tasks_name) == 0: | ||
351 | # Nothing to do | ||
352 | return | ||
353 | |||
354 | bb.msg.note(1, bb.msg.domain.RunQueue, "Preparing runqueue") | ||
355 | |||
356 | # Step A - Work out a list of tasks to run | ||
357 | # | ||
358 | # Taskdata gives us a list of possible providers for every build and run | ||
359 | # target ordered by priority. It also gives information on each of those | ||
360 | # providers. | ||
361 | # | ||
362 | # To create the actual list of tasks to execute we fix the list of | ||
363 | # providers and then resolve the dependencies into task IDs. This | ||
364 | # process is repeated for each type of dependency (tdepends, deptask, | ||
365 | # rdeptast, recrdeptask, idepends). | ||
366 | |||
367 | def add_build_dependencies(depids, tasknames, depends): | ||
368 | for depid in depids: | ||
369 | # Won't be in build_targets if ASSUME_PROVIDED | ||
370 | if depid not in taskData.build_targets: | ||
371 | continue | ||
372 | depdata = taskData.build_targets[depid][0] | ||
373 | if depdata is None: | ||
374 | continue | ||
375 | dep = taskData.fn_index[depdata] | ||
376 | for taskname in tasknames: | ||
377 | taskid = taskData.gettask_id(dep, taskname, False) | ||
378 | if taskid is not None: | ||
379 | depends.append(taskid) | ||
380 | |||
381 | def add_runtime_dependencies(depids, tasknames, depends): | ||
382 | for depid in depids: | ||
383 | if depid not in taskData.run_targets: | ||
384 | continue | ||
385 | depdata = taskData.run_targets[depid][0] | ||
386 | if depdata is None: | ||
387 | continue | ||
388 | dep = taskData.fn_index[depdata] | ||
389 | for taskname in tasknames: | ||
390 | taskid = taskData.gettask_id(dep, taskname, False) | ||
391 | if taskid is not None: | ||
392 | depends.append(taskid) | ||
393 | |||
394 | for task in range(len(taskData.tasks_name)): | ||
395 | depends = [] | ||
396 | recrdepends = [] | ||
397 | fnid = taskData.tasks_fnid[task] | ||
398 | fn = taskData.fn_index[fnid] | ||
399 | task_deps = self.dataCache.task_deps[fn] | ||
400 | |||
401 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Processing %s:%s" %(fn, taskData.tasks_name[task])) | ||
402 | |||
403 | if fnid not in taskData.failed_fnids: | ||
404 | |||
405 | # Resolve task internal dependencies | ||
406 | # | ||
407 | # e.g. addtask before X after Y | ||
408 | depends = taskData.tasks_tdepends[task] | ||
409 | |||
410 | # Resolve 'deptask' dependencies | ||
411 | # | ||
412 | # e.g. do_sometask[deptask] = "do_someothertask" | ||
413 | # (makes sure sometask runs after someothertask of all DEPENDS) | ||
414 | if 'deptask' in task_deps and taskData.tasks_name[task] in task_deps['deptask']: | ||
415 | tasknames = task_deps['deptask'][taskData.tasks_name[task]].split() | ||
416 | add_build_dependencies(taskData.depids[fnid], tasknames, depends) | ||
417 | |||
418 | # Resolve 'rdeptask' dependencies | ||
419 | # | ||
420 | # e.g. do_sometask[rdeptask] = "do_someothertask" | ||
421 | # (makes sure sometask runs after someothertask of all RDEPENDS) | ||
422 | if 'rdeptask' in task_deps and taskData.tasks_name[task] in task_deps['rdeptask']: | ||
423 | taskname = task_deps['rdeptask'][taskData.tasks_name[task]] | ||
424 | add_runtime_dependencies(taskData.rdepids[fnid], [taskname], depends) | ||
425 | |||
426 | # Resolve inter-task dependencies | ||
427 | # | ||
428 | # e.g. do_sometask[depends] = "targetname:do_someothertask" | ||
429 | # (makes sure sometask runs after targetname's someothertask) | ||
430 | if fnid not in tdepends_fnid: | ||
431 | tdepends_fnid[fnid] = set() | ||
432 | idepends = taskData.tasks_idepends[task] | ||
433 | for (depid, idependtask) in idepends: | ||
434 | if depid in taskData.build_targets: | ||
435 | # Won't be in build_targets if ASSUME_PROVIDED | ||
436 | depdata = taskData.build_targets[depid][0] | ||
437 | if depdata is not None: | ||
438 | dep = taskData.fn_index[depdata] | ||
439 | taskid = taskData.gettask_id(dep, idependtask) | ||
440 | depends.append(taskid) | ||
441 | if depdata != fnid: | ||
442 | tdepends_fnid[fnid].add(taskid) | ||
443 | |||
444 | |||
445 | # Resolve recursive 'recrdeptask' dependencies (A) | ||
446 | # | ||
447 | # e.g. do_sometask[recrdeptask] = "do_someothertask" | ||
448 | # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively) | ||
449 | # We cover the recursive part of the dependencies below | ||
450 | if 'recrdeptask' in task_deps and taskData.tasks_name[task] in task_deps['recrdeptask']: | ||
451 | for taskname in task_deps['recrdeptask'][taskData.tasks_name[task]].split(): | ||
452 | recrdepends.append(taskname) | ||
453 | add_build_dependencies(taskData.depids[fnid], [taskname], depends) | ||
454 | add_runtime_dependencies(taskData.rdepids[fnid], [taskname], depends) | ||
455 | |||
456 | # Rmove all self references | ||
457 | if task in depends: | ||
458 | newdep = [] | ||
459 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Task %s (%s %s) contains self reference! %s" % (task, taskData.fn_index[taskData.tasks_fnid[task]], taskData.tasks_name[task], depends)) | ||
460 | for dep in depends: | ||
461 | if task != dep: | ||
462 | newdep.append(dep) | ||
463 | depends = newdep | ||
464 | |||
465 | self.runq_fnid.append(taskData.tasks_fnid[task]) | ||
466 | self.runq_task.append(taskData.tasks_name[task]) | ||
467 | self.runq_depends.append(set(depends)) | ||
468 | self.runq_revdeps.append(set()) | ||
469 | |||
470 | runq_build.append(0) | ||
471 | runq_recrdepends.append(recrdepends) | ||
472 | |||
473 | # | ||
474 | # Build a list of recursive cumulative dependencies for each fnid | ||
475 | # We do this by fnid, since if A depends on some task in B | ||
476 | # we're interested in later tasks B's fnid might have but B itself | ||
477 | # doesn't depend on | ||
478 | # | ||
479 | # Algorithm is O(tasks) + O(tasks)*O(fnids) | ||
480 | # | ||
481 | reccumdepends = {} | ||
482 | for task in range(len(self.runq_fnid)): | ||
483 | fnid = self.runq_fnid[task] | ||
484 | if fnid not in reccumdepends: | ||
485 | if fnid in tdepends_fnid: | ||
486 | reccumdepends[fnid] = tdepends_fnid[fnid] | ||
487 | else: | ||
488 | reccumdepends[fnid] = set() | ||
489 | reccumdepends[fnid].update(self.runq_depends[task]) | ||
490 | for task in range(len(self.runq_fnid)): | ||
491 | taskfnid = self.runq_fnid[task] | ||
492 | for fnid in reccumdepends: | ||
493 | if task in reccumdepends[fnid]: | ||
494 | reccumdepends[fnid].add(task) | ||
495 | if taskfnid in reccumdepends: | ||
496 | reccumdepends[fnid].update(reccumdepends[taskfnid]) | ||
497 | |||
498 | |||
499 | # Resolve recursive 'recrdeptask' dependencies (B) | ||
500 | # | ||
501 | # e.g. do_sometask[recrdeptask] = "do_someothertask" | ||
502 | # (makes sure sometask runs after someothertask of all DEPENDS, RDEPENDS and intertask dependencies, recursively) | ||
503 | for task in range(len(self.runq_fnid)): | ||
504 | if len(runq_recrdepends[task]) > 0: | ||
505 | taskfnid = self.runq_fnid[task] | ||
506 | for dep in reccumdepends[taskfnid]: | ||
507 | # Ignore self references | ||
508 | if dep == task: | ||
509 | continue | ||
510 | for taskname in runq_recrdepends[task]: | ||
511 | if taskData.tasks_name[dep] == taskname: | ||
512 | self.runq_depends[task].add(dep) | ||
513 | |||
514 | # Step B - Mark all active tasks | ||
515 | # | ||
516 | # Start with the tasks we were asked to run and mark all dependencies | ||
517 | # as active too. If the task is to be 'forced', clear its stamp. Once | ||
518 | # all active tasks are marked, prune the ones we don't need. | ||
519 | |||
520 | bb.msg.note(2, bb.msg.domain.RunQueue, "Marking Active Tasks") | ||
521 | |||
522 | def mark_active(listid, depth): | ||
523 | """ | ||
524 | Mark an item as active along with its depends | ||
525 | (calls itself recursively) | ||
526 | """ | ||
527 | |||
528 | if runq_build[listid] == 1: | ||
529 | return | ||
530 | |||
531 | runq_build[listid] = 1 | ||
532 | |||
533 | depends = self.runq_depends[listid] | ||
534 | for depend in depends: | ||
535 | mark_active(depend, depth+1) | ||
536 | |||
537 | self.target_pairs = [] | ||
538 | for target in self.targets: | ||
539 | targetid = taskData.getbuild_id(target[0]) | ||
540 | |||
541 | if targetid not in taskData.build_targets: | ||
542 | continue | ||
543 | |||
544 | if targetid in taskData.failed_deps: | ||
545 | continue | ||
546 | |||
547 | fnid = taskData.build_targets[targetid][0] | ||
548 | fn = taskData.fn_index[fnid] | ||
549 | self.target_pairs.append((fn, target[1])) | ||
550 | |||
551 | # Remove stamps for targets if force mode active | ||
552 | if self.cooker.configuration.force: | ||
553 | bb.msg.note(2, bb.msg.domain.RunQueue, "Remove stamp %s, %s" % (target[1], fn)) | ||
554 | bb.build.del_stamp(target[1], self.dataCache, fn) | ||
555 | |||
556 | if fnid in taskData.failed_fnids: | ||
557 | continue | ||
558 | |||
559 | if target[1] not in taskData.tasks_lookup[fnid]: | ||
560 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s does not exist for target %s" % (target[1], target[0])) | ||
561 | |||
562 | listid = taskData.tasks_lookup[fnid][target[1]] | ||
563 | |||
564 | mark_active(listid, 1) | ||
565 | |||
566 | # Step C - Prune all inactive tasks | ||
567 | # | ||
568 | # Once all active tasks are marked, prune the ones we don't need. | ||
569 | |||
570 | maps = [] | ||
571 | delcount = 0 | ||
572 | for listid in range(len(self.runq_fnid)): | ||
573 | if runq_build[listid-delcount] == 1: | ||
574 | maps.append(listid-delcount) | ||
575 | else: | ||
576 | del self.runq_fnid[listid-delcount] | ||
577 | del self.runq_task[listid-delcount] | ||
578 | del self.runq_depends[listid-delcount] | ||
579 | del runq_build[listid-delcount] | ||
580 | del self.runq_revdeps[listid-delcount] | ||
581 | delcount = delcount + 1 | ||
582 | maps.append(-1) | ||
583 | |||
584 | # | ||
585 | # Step D - Sanity checks and computation | ||
586 | # | ||
587 | |||
588 | # Check to make sure we still have tasks to run | ||
589 | if len(self.runq_fnid) == 0: | ||
590 | if not taskData.abort: | ||
591 | bb.msg.fatal(bb.msg.domain.RunQueue, "All buildable tasks have been run but the build is incomplete (--continue mode). Errors for the tasks that failed will have been printed above.") | ||
592 | else: | ||
593 | bb.msg.fatal(bb.msg.domain.RunQueue, "No active tasks and not in --continue mode?! Please report this bug.") | ||
594 | |||
595 | bb.msg.note(2, bb.msg.domain.RunQueue, "Pruned %s inactive tasks, %s left" % (delcount, len(self.runq_fnid))) | ||
596 | |||
597 | # Remap the dependencies to account for the deleted tasks | ||
598 | # Check we didn't delete a task we depend on | ||
599 | for listid in range(len(self.runq_fnid)): | ||
600 | newdeps = [] | ||
601 | origdeps = self.runq_depends[listid] | ||
602 | for origdep in origdeps: | ||
603 | if maps[origdep] == -1: | ||
604 | bb.msg.fatal(bb.msg.domain.RunQueue, "Invalid mapping - Should never happen!") | ||
605 | newdeps.append(maps[origdep]) | ||
606 | self.runq_depends[listid] = set(newdeps) | ||
607 | |||
608 | bb.msg.note(2, bb.msg.domain.RunQueue, "Assign Weightings") | ||
609 | |||
610 | # Generate a list of reverse dependencies to ease future calculations | ||
611 | for listid in range(len(self.runq_fnid)): | ||
612 | for dep in self.runq_depends[listid]: | ||
613 | self.runq_revdeps[dep].add(listid) | ||
614 | |||
615 | # Identify tasks at the end of dependency chains | ||
616 | # Error on circular dependency loops (length two) | ||
617 | endpoints = [] | ||
618 | for listid in range(len(self.runq_fnid)): | ||
619 | revdeps = self.runq_revdeps[listid] | ||
620 | if len(revdeps) == 0: | ||
621 | endpoints.append(listid) | ||
622 | for dep in revdeps: | ||
623 | if dep in self.runq_depends[listid]: | ||
624 | #self.dump_data(taskData) | ||
625 | bb.msg.fatal(bb.msg.domain.RunQueue, "Task %s (%s) has circular dependency on %s (%s)" % (taskData.fn_index[self.runq_fnid[dep]], self.runq_task[dep] , taskData.fn_index[self.runq_fnid[listid]], self.runq_task[listid])) | ||
626 | |||
627 | bb.msg.note(2, bb.msg.domain.RunQueue, "Compute totals (have %s endpoint(s))" % len(endpoints)) | ||
628 | |||
629 | # Calculate task weights | ||
630 | # Check of higher length circular dependencies | ||
631 | self.runq_weight = self.calculate_task_weights(endpoints) | ||
632 | |||
633 | # Decide what order to execute the tasks in, pick a scheduler | ||
634 | #self.sched = RunQueueScheduler(self) | ||
635 | if self.scheduler == "completion": | ||
636 | self.sched = RunQueueSchedulerCompletion(self) | ||
637 | else: | ||
638 | self.sched = RunQueueSchedulerSpeed(self) | ||
639 | |||
640 | # Sanity Check - Check for multiple tasks building the same provider | ||
641 | prov_list = {} | ||
642 | seen_fn = [] | ||
643 | for task in range(len(self.runq_fnid)): | ||
644 | fn = taskData.fn_index[self.runq_fnid[task]] | ||
645 | if fn in seen_fn: | ||
646 | continue | ||
647 | seen_fn.append(fn) | ||
648 | for prov in self.dataCache.fn_provides[fn]: | ||
649 | if prov not in prov_list: | ||
650 | prov_list[prov] = [fn] | ||
651 | elif fn not in prov_list[prov]: | ||
652 | prov_list[prov].append(fn) | ||
653 | error = False | ||
654 | for prov in prov_list: | ||
655 | if len(prov_list[prov]) > 1 and prov not in self.multi_provider_whitelist: | ||
656 | error = True | ||
657 | bb.msg.error(bb.msg.domain.RunQueue, "Multiple .bb files are due to be built which each provide %s (%s).\n This usually means one provides something the other doesn't and should." % (prov, " ".join(prov_list[prov]))) | ||
658 | #if error: | ||
659 | # bb.msg.fatal(bb.msg.domain.RunQueue, "Corrupted metadata configuration detected, aborting...") | ||
660 | |||
661 | |||
662 | # Create a whitelist usable by the stamp checks | ||
663 | stampfnwhitelist = [] | ||
664 | for entry in self.stampwhitelist.split(): | ||
665 | entryid = self.taskData.getbuild_id(entry) | ||
666 | if entryid not in self.taskData.build_targets: | ||
667 | continue | ||
668 | fnid = self.taskData.build_targets[entryid][0] | ||
669 | fn = self.taskData.fn_index[fnid] | ||
670 | stampfnwhitelist.append(fn) | ||
671 | self.stampfnwhitelist = stampfnwhitelist | ||
672 | |||
673 | #self.dump_data(taskData) | ||
674 | |||
675 | self.state = runQueueRunInit | ||
676 | |||
677 | def check_stamps(self): | ||
678 | unchecked = {} | ||
679 | current = [] | ||
680 | notcurrent = [] | ||
681 | buildable = [] | ||
682 | |||
683 | if self.stamppolicy == "perfile": | ||
684 | fulldeptree = False | ||
685 | else: | ||
686 | fulldeptree = True | ||
687 | stampwhitelist = [] | ||
688 | if self.stamppolicy == "whitelist": | ||
689 | stampwhitelist = self.self.stampfnwhitelist | ||
690 | |||
691 | for task in range(len(self.runq_fnid)): | ||
692 | unchecked[task] = "" | ||
693 | if len(self.runq_depends[task]) == 0: | ||
694 | buildable.append(task) | ||
695 | |||
696 | def check_buildable(self, task, buildable): | ||
697 | for revdep in self.runq_revdeps[task]: | ||
698 | alldeps = 1 | ||
699 | for dep in self.runq_depends[revdep]: | ||
700 | if dep in unchecked: | ||
701 | alldeps = 0 | ||
702 | if alldeps == 1: | ||
703 | if revdep in unchecked: | ||
704 | buildable.append(revdep) | ||
705 | |||
706 | for task in range(len(self.runq_fnid)): | ||
707 | if task not in unchecked: | ||
708 | continue | ||
709 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
710 | taskname = self.runq_task[task] | ||
711 | stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname) | ||
712 | # If the stamp is missing its not current | ||
713 | if not os.access(stampfile, os.F_OK): | ||
714 | del unchecked[task] | ||
715 | notcurrent.append(task) | ||
716 | check_buildable(self, task, buildable) | ||
717 | continue | ||
718 | # If its a 'nostamp' task, it's not current | ||
719 | taskdep = self.dataCache.task_deps[fn] | ||
720 | if 'nostamp' in taskdep and task in taskdep['nostamp']: | ||
721 | del unchecked[task] | ||
722 | notcurrent.append(task) | ||
723 | check_buildable(self, task, buildable) | ||
724 | continue | ||
725 | |||
726 | while (len(buildable) > 0): | ||
727 | nextbuildable = [] | ||
728 | for task in buildable: | ||
729 | if task in unchecked: | ||
730 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
731 | taskname = self.runq_task[task] | ||
732 | stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname) | ||
733 | iscurrent = True | ||
734 | |||
735 | t1 = os.stat(stampfile)[stat.ST_MTIME] | ||
736 | for dep in self.runq_depends[task]: | ||
737 | if iscurrent: | ||
738 | fn2 = self.taskData.fn_index[self.runq_fnid[dep]] | ||
739 | taskname2 = self.runq_task[dep] | ||
740 | stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2) | ||
741 | if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist): | ||
742 | if dep in notcurrent: | ||
743 | iscurrent = False | ||
744 | else: | ||
745 | t2 = os.stat(stampfile2)[stat.ST_MTIME] | ||
746 | if t1 < t2: | ||
747 | iscurrent = False | ||
748 | del unchecked[task] | ||
749 | if iscurrent: | ||
750 | current.append(task) | ||
751 | else: | ||
752 | notcurrent.append(task) | ||
753 | |||
754 | check_buildable(self, task, nextbuildable) | ||
755 | |||
756 | buildable = nextbuildable | ||
757 | |||
758 | #for task in range(len(self.runq_fnid)): | ||
759 | # fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
760 | # taskname = self.runq_task[task] | ||
761 | # print "%s %s.%s" % (task, taskname, fn) | ||
762 | |||
763 | #print "Unchecked: %s" % unchecked | ||
764 | #print "Current: %s" % current | ||
765 | #print "Not current: %s" % notcurrent | ||
766 | |||
767 | if len(unchecked) > 0: | ||
768 | bb.fatal("check_stamps fatal internal error") | ||
769 | return current | ||
770 | |||
771 | def check_stamp_task(self, task): | ||
772 | |||
773 | if self.stamppolicy == "perfile": | ||
774 | fulldeptree = False | ||
775 | else: | ||
776 | fulldeptree = True | ||
777 | stampwhitelist = [] | ||
778 | if self.stamppolicy == "whitelist": | ||
779 | stampwhitelist = self.stampfnwhitelist | ||
780 | |||
781 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
782 | taskname = self.runq_task[task] | ||
783 | stampfile = "%s.%s" % (self.dataCache.stamp[fn], taskname) | ||
784 | # If the stamp is missing its not current | ||
785 | if not os.access(stampfile, os.F_OK): | ||
786 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s not available\n" % stampfile) | ||
787 | return False | ||
788 | # If its a 'nostamp' task, it's not current | ||
789 | taskdep = self.dataCache.task_deps[fn] | ||
790 | if 'nostamp' in taskdep and taskname in taskdep['nostamp']: | ||
791 | bb.msg.debug(2, bb.msg.domain.RunQueue, "%s.%s is nostamp\n" % (fn, taskname)) | ||
792 | return False | ||
793 | |||
794 | iscurrent = True | ||
795 | t1 = os.stat(stampfile)[stat.ST_MTIME] | ||
796 | for dep in self.runq_depends[task]: | ||
797 | if iscurrent: | ||
798 | fn2 = self.taskData.fn_index[self.runq_fnid[dep]] | ||
799 | taskname2 = self.runq_task[dep] | ||
800 | stampfile2 = "%s.%s" % (self.dataCache.stamp[fn2], taskname2) | ||
801 | if fn == fn2 or (fulldeptree and fn2 not in stampwhitelist): | ||
802 | try: | ||
803 | t2 = os.stat(stampfile2)[stat.ST_MTIME] | ||
804 | if t1 < t2: | ||
805 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stampfile %s < %s" % (stampfile,stampfile2)) | ||
806 | iscurrent = False | ||
807 | except: | ||
808 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Exception reading %s for %s" % (stampfile2 ,stampfile)) | ||
809 | iscurrent = False | ||
810 | |||
811 | return iscurrent | ||
812 | |||
813 | def execute_runqueue(self): | ||
814 | """ | ||
815 | Run the tasks in a queue prepared by prepare_runqueue | ||
816 | Upon failure, optionally try to recover the build using any alternate providers | ||
817 | (if the abort on failure configuration option isn't set) | ||
818 | """ | ||
819 | |||
820 | if self.state is runQueuePrepare: | ||
821 | self.prepare_runqueue() | ||
822 | |||
823 | if self.state is runQueueRunInit: | ||
824 | bb.msg.note(1, bb.msg.domain.RunQueue, "Executing runqueue") | ||
825 | self.execute_runqueue_initVars() | ||
826 | |||
827 | if self.state is runQueueRunning: | ||
828 | self.execute_runqueue_internal() | ||
829 | |||
830 | if self.state is runQueueCleanUp: | ||
831 | self.finish_runqueue() | ||
832 | |||
833 | if self.state is runQueueFailed: | ||
834 | if not self.taskData.tryaltconfigs: | ||
835 | raise bb.runqueue.TaskFailure(self.failed_fnids) | ||
836 | for fnid in self.failed_fnids: | ||
837 | self.taskData.fail_fnid(fnid) | ||
838 | self.reset_runqueue() | ||
839 | |||
840 | if self.state is runQueueComplete: | ||
841 | # All done | ||
842 | bb.msg.note(1, bb.msg.domain.RunQueue, "Tasks Summary: Attempted %d tasks of which %d didn't need to be rerun and %d failed." % (self.stats.completed, self.stats.skipped, self.stats.failed)) | ||
843 | return False | ||
844 | |||
845 | if self.state is runQueueChildProcess: | ||
846 | print "Child process" | ||
847 | return False | ||
848 | |||
849 | # Loop | ||
850 | return True | ||
851 | |||
852 | def execute_runqueue_initVars(self): | ||
853 | |||
854 | self.stats = RunQueueStats(len(self.runq_fnid)) | ||
855 | |||
856 | self.runq_buildable = [] | ||
857 | self.runq_running = [] | ||
858 | self.runq_complete = [] | ||
859 | self.build_pids = {} | ||
860 | self.build_pipes = {} | ||
861 | self.failed_fnids = [] | ||
862 | |||
863 | # Mark initial buildable tasks | ||
864 | for task in range(self.stats.total): | ||
865 | self.runq_running.append(0) | ||
866 | self.runq_complete.append(0) | ||
867 | if len(self.runq_depends[task]) == 0: | ||
868 | self.runq_buildable.append(1) | ||
869 | else: | ||
870 | self.runq_buildable.append(0) | ||
871 | |||
872 | self.state = runQueueRunning | ||
873 | |||
874 | event.fire(bb.event.StampUpdate(self.target_pairs, self.dataCache.stamp), self.cfgData) | ||
875 | |||
876 | def task_complete(self, task): | ||
877 | """ | ||
878 | Mark a task as completed | ||
879 | Look at the reverse dependencies and mark any task with | ||
880 | completed dependencies as buildable | ||
881 | """ | ||
882 | self.runq_complete[task] = 1 | ||
883 | for revdep in self.runq_revdeps[task]: | ||
884 | if self.runq_running[revdep] == 1: | ||
885 | continue | ||
886 | if self.runq_buildable[revdep] == 1: | ||
887 | continue | ||
888 | alldeps = 1 | ||
889 | for dep in self.runq_depends[revdep]: | ||
890 | if self.runq_complete[dep] != 1: | ||
891 | alldeps = 0 | ||
892 | if alldeps == 1: | ||
893 | self.runq_buildable[revdep] = 1 | ||
894 | fn = self.taskData.fn_index[self.runq_fnid[revdep]] | ||
895 | taskname = self.runq_task[revdep] | ||
896 | bb.msg.debug(1, bb.msg.domain.RunQueue, "Marking task %s (%s, %s) as buildable" % (revdep, fn, taskname)) | ||
897 | |||
898 | def task_fail(self, task, exitcode): | ||
899 | """ | ||
900 | Called when a task has failed | ||
901 | Updates the state engine with the failure | ||
902 | """ | ||
903 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s (%s) failed with %s" % (task, self.get_user_idstring(task), exitcode)) | ||
904 | self.stats.taskFailed() | ||
905 | fnid = self.runq_fnid[task] | ||
906 | self.failed_fnids.append(fnid) | ||
907 | bb.event.fire(runQueueTaskFailed(task, self.stats, self), self.cfgData) | ||
908 | if self.taskData.abort: | ||
909 | self.state = runQueueCleanup | ||
910 | |||
911 | def execute_runqueue_internal(self): | ||
912 | """ | ||
913 | Run the tasks in a queue prepared by prepare_runqueue | ||
914 | """ | ||
915 | |||
916 | if self.stats.total == 0: | ||
917 | # nothing to do | ||
918 | self.state = runQueueCleanup | ||
919 | |||
920 | while True: | ||
921 | task = None | ||
922 | if self.stats.active < self.number_tasks: | ||
923 | task = self.sched.next() | ||
924 | if task is not None: | ||
925 | fn = self.taskData.fn_index[self.runq_fnid[task]] | ||
926 | |||
927 | taskname = self.runq_task[task] | ||
928 | if self.check_stamp_task(task): | ||
929 | bb.msg.debug(2, bb.msg.domain.RunQueue, "Stamp current task %s (%s)" % (task, self.get_user_idstring(task))) | ||
930 | self.runq_running[task] = 1 | ||
931 | self.runq_buildable[task] = 1 | ||
932 | self.task_complete(task) | ||
933 | self.stats.taskCompleted() | ||
934 | self.stats.taskSkipped() | ||
935 | continue | ||
936 | |||
937 | sys.stdout.flush() | ||
938 | sys.stderr.flush() | ||
939 | try: | ||
940 | pipein, pipeout = os.pipe() | ||
941 | pid = os.fork() | ||
942 | except OSError, e: | ||
943 | bb.msg.fatal(bb.msg.domain.RunQueue, "fork failed: %d (%s)" % (e.errno, e.strerror)) | ||
944 | if pid == 0: | ||
945 | os.close(pipein) | ||
946 | # Save out the PID so that the event can include it the | ||
947 | # events | ||
948 | bb.event.worker_pid = os.getpid() | ||
949 | bb.event.worker_pipe = pipeout | ||
950 | |||
951 | self.state = runQueueChildProcess | ||
952 | # Make the child the process group leader | ||
953 | os.setpgid(0, 0) | ||
954 | # No stdin | ||
955 | newsi = os.open('/dev/null', os.O_RDWR) | ||
956 | os.dup2(newsi, sys.stdin.fileno()) | ||
957 | |||
958 | bb.event.fire(runQueueTaskStarted(task, self.stats, self), self.cfgData) | ||
959 | bb.msg.note(1, bb.msg.domain.RunQueue, | ||
960 | "Running task %d of %d (ID: %s, %s)" % (self.stats.completed + self.stats.active + 1, | ||
961 | self.stats.total, | ||
962 | task, | ||
963 | self.get_user_idstring(task))) | ||
964 | |||
965 | bb.data.setVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", self, self.cooker.configuration.data) | ||
966 | try: | ||
967 | self.cooker.tryBuild(fn, taskname[3:]) | ||
968 | except bb.build.EventException: | ||
969 | bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed") | ||
970 | os._exit(1) | ||
971 | except: | ||
972 | bb.msg.error(bb.msg.domain.Build, "Build of " + fn + " " + taskname + " failed") | ||
973 | os._exit(1) | ||
974 | os._exit(0) | ||
975 | |||
976 | self.build_pids[pid] = task | ||
977 | self.build_pipes[pid] = runQueuePipe(pipein, pipeout, self.cfgData) | ||
978 | self.runq_running[task] = 1 | ||
979 | self.stats.taskActive() | ||
980 | if self.stats.active < self.number_tasks: | ||
981 | continue | ||
982 | |||
983 | for pipe in self.build_pipes: | ||
984 | self.build_pipes[pipe].read() | ||
985 | |||
986 | if self.stats.active > 0: | ||
987 | result = os.waitpid(-1, os.WNOHANG) | ||
988 | if result[0] is 0 and result[1] is 0: | ||
989 | return | ||
990 | task = self.build_pids[result[0]] | ||
991 | del self.build_pids[result[0]] | ||
992 | self.build_pipes[result[0]].close() | ||
993 | del self.build_pipes[result[0]] | ||
994 | if result[1] != 0: | ||
995 | self.task_fail(task, result[1]) | ||
996 | return | ||
997 | self.task_complete(task) | ||
998 | self.stats.taskCompleted() | ||
999 | bb.event.fire(runQueueTaskCompleted(task, self.stats, self), self.cfgData) | ||
1000 | continue | ||
1001 | |||
1002 | if len(self.failed_fnids) != 0: | ||
1003 | self.state = runQueueFailed | ||
1004 | return | ||
1005 | |||
1006 | # Sanity Checks | ||
1007 | for task in range(self.stats.total): | ||
1008 | if self.runq_buildable[task] == 0: | ||
1009 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s never buildable!" % task) | ||
1010 | if self.runq_running[task] == 0: | ||
1011 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s never ran!" % task) | ||
1012 | if self.runq_complete[task] == 0: | ||
1013 | bb.msg.error(bb.msg.domain.RunQueue, "Task %s never completed!" % task) | ||
1014 | self.state = runQueueComplete | ||
1015 | return | ||
1016 | |||
1017 | def finish_runqueue_now(self): | ||
1018 | bb.msg.note(1, bb.msg.domain.RunQueue, "Sending SIGINT to remaining %s tasks" % self.stats.active) | ||
1019 | for k, v in self.build_pids.iteritems(): | ||
1020 | try: | ||
1021 | os.kill(-k, signal.SIGINT) | ||
1022 | except: | ||
1023 | pass | ||
1024 | for pipe in self.build_pipes: | ||
1025 | self.build_pipes[pipe].read() | ||
1026 | |||
1027 | def finish_runqueue(self, now = False): | ||
1028 | self.state = runQueueCleanUp | ||
1029 | if now: | ||
1030 | self.finish_runqueue_now() | ||
1031 | try: | ||
1032 | while self.stats.active > 0: | ||
1033 | bb.event.fire(runQueueExitWait(self.stats.active), self.cfgData) | ||
1034 | bb.msg.note(1, bb.msg.domain.RunQueue, "Waiting for %s active tasks to finish" % self.stats.active) | ||
1035 | tasknum = 1 | ||
1036 | for k, v in self.build_pids.iteritems(): | ||
1037 | bb.msg.note(1, bb.msg.domain.RunQueue, "%s: %s (%s)" % (tasknum, self.get_user_idstring(v), k)) | ||
1038 | tasknum = tasknum + 1 | ||
1039 | result = os.waitpid(-1, os.WNOHANG) | ||
1040 | if result[0] is 0 and result[1] is 0: | ||
1041 | return | ||
1042 | task = self.build_pids[result[0]] | ||
1043 | del self.build_pids[result[0]] | ||
1044 | self.build_pipes[result[0]].close() | ||
1045 | del self.build_pipes[result[0]] | ||
1046 | if result[1] != 0: | ||
1047 | self.task_fail(task, result[1]) | ||
1048 | else: | ||
1049 | self.stats.taskCompleted() | ||
1050 | bb.event.fire(runQueueTaskCompleted(task, self.stats, self), self.cfgData) | ||
1051 | except: | ||
1052 | self.finish_runqueue_now() | ||
1053 | raise | ||
1054 | |||
1055 | if len(self.failed_fnids) != 0: | ||
1056 | self.state = runQueueFailed | ||
1057 | return | ||
1058 | |||
1059 | self.state = runQueueComplete | ||
1060 | return | ||
1061 | |||
1062 | def dump_data(self, taskQueue): | ||
1063 | """ | ||
1064 | Dump some debug information on the internal data structures | ||
1065 | """ | ||
1066 | bb.msg.debug(3, bb.msg.domain.RunQueue, "run_tasks:") | ||
1067 | for task in range(len(self.runq_task)): | ||
1068 | bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task, | ||
1069 | taskQueue.fn_index[self.runq_fnid[task]], | ||
1070 | self.runq_task[task], | ||
1071 | self.runq_weight[task], | ||
1072 | self.runq_depends[task], | ||
1073 | self.runq_revdeps[task])) | ||
1074 | |||
1075 | bb.msg.debug(3, bb.msg.domain.RunQueue, "sorted_tasks:") | ||
1076 | for task1 in range(len(self.runq_task)): | ||
1077 | if task1 in self.prio_map: | ||
1078 | task = self.prio_map[task1] | ||
1079 | bb.msg.debug(3, bb.msg.domain.RunQueue, " (%s)%s - %s: %s Deps %s RevDeps %s" % (task, | ||
1080 | taskQueue.fn_index[self.runq_fnid[task]], | ||
1081 | self.runq_task[task], | ||
1082 | self.runq_weight[task], | ||
1083 | self.runq_depends[task], | ||
1084 | self.runq_revdeps[task])) | ||
1085 | |||
1086 | |||
1087 | class TaskFailure(Exception): | ||
1088 | """ | ||
1089 | Exception raised when a task in a runqueue fails | ||
1090 | """ | ||
1091 | def __init__(self, x): | ||
1092 | self.args = x | ||
1093 | |||
1094 | |||
1095 | class runQueueExitWait(bb.event.Event): | ||
1096 | """ | ||
1097 | Event when waiting for task processes to exit | ||
1098 | """ | ||
1099 | |||
1100 | def __init__(self, remain): | ||
1101 | self.remain = remain | ||
1102 | self.message = "Waiting for %s active tasks to finish" % remain | ||
1103 | bb.event.Event.__init__(self) | ||
1104 | |||
1105 | class runQueueEvent(bb.event.Event): | ||
1106 | """ | ||
1107 | Base runQueue event class | ||
1108 | """ | ||
1109 | def __init__(self, task, stats, rq): | ||
1110 | self.taskid = task | ||
1111 | self.taskstring = rq.get_user_idstring(task) | ||
1112 | self.stats = stats | ||
1113 | bb.event.Event.__init__(self) | ||
1114 | |||
1115 | class runQueueTaskStarted(runQueueEvent): | ||
1116 | """ | ||
1117 | Event notifing a task was started | ||
1118 | """ | ||
1119 | def __init__(self, task, stats, rq): | ||
1120 | runQueueEvent.__init__(self, task, stats, rq) | ||
1121 | self.message = "Running task %s (%d of %d) (%s)" % (task, stats.completed + stats.active + 1, self.stats.total, self.taskstring) | ||
1122 | |||
1123 | class runQueueTaskFailed(runQueueEvent): | ||
1124 | """ | ||
1125 | Event notifing a task failed | ||
1126 | """ | ||
1127 | def __init__(self, task, stats, rq): | ||
1128 | runQueueEvent.__init__(self, task, stats, rq) | ||
1129 | self.message = "Task %s failed (%s)" % (task, self.taskstring) | ||
1130 | |||
1131 | class runQueueTaskCompleted(runQueueEvent): | ||
1132 | """ | ||
1133 | Event notifing a task completed | ||
1134 | """ | ||
1135 | def __init__(self, task, stats, rq): | ||
1136 | runQueueEvent.__init__(self, task, stats, rq) | ||
1137 | self.message = "Task %s completed (%s)" % (task, self.taskstring) | ||
1138 | |||
1139 | def check_stamp_fn(fn, taskname, d): | ||
1140 | rq = bb.data.getVar("__RUNQUEUE_DO_NOT_USE_EXTERNALLY", d) | ||
1141 | fnid = rq.taskData.getfn_id(fn) | ||
1142 | taskid = rq.get_task_id(fnid, taskname) | ||
1143 | if taskid is not None: | ||
1144 | return rq.check_stamp_task(taskid) | ||
1145 | return None | ||
1146 | |||
1147 | class runQueuePipe(): | ||
1148 | """ | ||
1149 | Abstraction for a pipe between a worker thread and the server | ||
1150 | """ | ||
1151 | def __init__(self, pipein, pipeout, d): | ||
1152 | self.fd = pipein | ||
1153 | os.close(pipeout) | ||
1154 | self.queue = "" | ||
1155 | self.d = d | ||
1156 | |||
1157 | def read(self): | ||
1158 | start = len(self.queue) | ||
1159 | self.queue = self.queue + os.read(self.fd, 1024) | ||
1160 | end = len(self.queue) | ||
1161 | index = self.queue.find("</event>") | ||
1162 | while index != -1: | ||
1163 | bb.event.fire_from_worker(self.queue[:index+8], self.d) | ||
1164 | self.queue = self.queue[index+8:] | ||
1165 | index = self.queue.find("</event>") | ||
1166 | return (end > start) | ||
1167 | |||
1168 | def close(self): | ||
1169 | while self.read(): | ||
1170 | continue | ||
1171 | if len(self.queue) > 0: | ||
1172 | print "Warning, worker left partial message" | ||
1173 | os.close(self.fd) | ||
1174 | |||
diff --git a/bitbake-dev/lib/bb/server/__init__.py b/bitbake-dev/lib/bb/server/__init__.py deleted file mode 100644 index 1a732236e2..0000000000 --- a/bitbake-dev/lib/bb/server/__init__.py +++ /dev/null | |||
@@ -1,2 +0,0 @@ | |||
1 | import xmlrpc | ||
2 | import none | ||
diff --git a/bitbake-dev/lib/bb/server/none.py b/bitbake-dev/lib/bb/server/none.py deleted file mode 100644 index ebda111582..0000000000 --- a/bitbake-dev/lib/bb/server/none.py +++ /dev/null | |||
@@ -1,181 +0,0 @@ | |||
1 | # | ||
2 | # BitBake 'dummy' Passthrough Server | ||
3 | # | ||
4 | # Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer | ||
5 | # Copyright (C) 2006 - 2008 Richard Purdie | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | |||
20 | """ | ||
21 | This module implements an xmlrpc server for BitBake. | ||
22 | |||
23 | Use this by deriving a class from BitBakeXMLRPCServer and then adding | ||
24 | methods which you want to "export" via XMLRPC. If the methods have the | ||
25 | prefix xmlrpc_, then registering those function will happen automatically, | ||
26 | if not, you need to call register_function. | ||
27 | |||
28 | Use register_idle_function() to add a function which the xmlrpc server | ||
29 | calls from within server_forever when no requests are pending. Make sure | ||
30 | that those functions are non-blocking or else you will introduce latency | ||
31 | in the server's main loop. | ||
32 | """ | ||
33 | |||
34 | import time | ||
35 | import bb | ||
36 | from bb.ui import uievent | ||
37 | import xmlrpclib | ||
38 | import pickle | ||
39 | |||
40 | DEBUG = False | ||
41 | |||
42 | from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler | ||
43 | import inspect, select | ||
44 | |||
45 | class BitBakeServerCommands(): | ||
46 | def __init__(self, server, cooker): | ||
47 | self.cooker = cooker | ||
48 | self.server = server | ||
49 | |||
50 | def runCommand(self, command): | ||
51 | """ | ||
52 | Run a cooker command on the server | ||
53 | """ | ||
54 | #print "Running Command %s" % command | ||
55 | return self.cooker.command.runCommand(command) | ||
56 | |||
57 | def terminateServer(self): | ||
58 | """ | ||
59 | Trigger the server to quit | ||
60 | """ | ||
61 | self.server.server_exit() | ||
62 | #print "Server (cooker) exitting" | ||
63 | return | ||
64 | |||
65 | def ping(self): | ||
66 | """ | ||
67 | Dummy method which can be used to check the server is still alive | ||
68 | """ | ||
69 | return True | ||
70 | |||
71 | eventQueue = [] | ||
72 | |||
73 | class BBUIEventQueue: | ||
74 | class event: | ||
75 | def __init__(self, parent): | ||
76 | self.parent = parent | ||
77 | @staticmethod | ||
78 | def send(event): | ||
79 | bb.server.none.eventQueue.append(pickle.loads(event)) | ||
80 | @staticmethod | ||
81 | def quit(): | ||
82 | return | ||
83 | |||
84 | def __init__(self, BBServer): | ||
85 | self.eventQueue = bb.server.none.eventQueue | ||
86 | self.BBServer = BBServer | ||
87 | self.EventHandle = bb.event.register_UIHhandler(self) | ||
88 | |||
89 | def getEvent(self): | ||
90 | if len(self.eventQueue) == 0: | ||
91 | return None | ||
92 | |||
93 | return self.eventQueue.pop(0) | ||
94 | |||
95 | def waitEvent(self, delay): | ||
96 | event = self.getEvent() | ||
97 | if event: | ||
98 | return event | ||
99 | self.BBServer.idle_commands(delay) | ||
100 | return self.getEvent() | ||
101 | |||
102 | def queue_event(self, event): | ||
103 | self.eventQueue.append(event) | ||
104 | |||
105 | def system_quit( self ): | ||
106 | bb.event.unregister_UIHhandler(self.EventHandle) | ||
107 | |||
108 | class BitBakeServer(): | ||
109 | # remove this when you're done with debugging | ||
110 | # allow_reuse_address = True | ||
111 | |||
112 | def __init__(self, cooker): | ||
113 | self._idlefuns = {} | ||
114 | self.commands = BitBakeServerCommands(self, cooker) | ||
115 | |||
116 | def register_idle_function(self, function, data): | ||
117 | """Register a function to be called while the server is idle""" | ||
118 | assert callable(function) | ||
119 | self._idlefuns[function] = data | ||
120 | |||
121 | def idle_commands(self, delay): | ||
122 | #print "Idle queue length %s" % len(self._idlefuns) | ||
123 | #print "Idle timeout, running idle functions" | ||
124 | #if len(self._idlefuns) == 0: | ||
125 | nextsleep = delay | ||
126 | for function, data in self._idlefuns.items(): | ||
127 | try: | ||
128 | retval = function(self, data, False) | ||
129 | #print "Idle function returned %s" % (retval) | ||
130 | if retval is False: | ||
131 | del self._idlefuns[function] | ||
132 | elif retval is True: | ||
133 | nextsleep = None | ||
134 | elif nextsleep is None: | ||
135 | continue | ||
136 | elif retval < nextsleep: | ||
137 | nextsleep = retval | ||
138 | except SystemExit: | ||
139 | raise | ||
140 | except: | ||
141 | import traceback | ||
142 | traceback.print_exc() | ||
143 | pass | ||
144 | if nextsleep is not None: | ||
145 | #print "Sleeping for %s (%s)" % (nextsleep, delay) | ||
146 | time.sleep(nextsleep) | ||
147 | |||
148 | def server_exit(self): | ||
149 | # Tell idle functions we're exiting | ||
150 | for function, data in self._idlefuns.items(): | ||
151 | try: | ||
152 | retval = function(self, data, True) | ||
153 | except: | ||
154 | pass | ||
155 | |||
156 | class BitbakeServerInfo(): | ||
157 | def __init__(self, server): | ||
158 | self.server = server | ||
159 | self.commands = server.commands | ||
160 | |||
161 | class BitBakeServerFork(): | ||
162 | def __init__(self, serverinfo, command, logfile): | ||
163 | serverinfo.forkCommand = command | ||
164 | serverinfo.logfile = logfile | ||
165 | |||
166 | class BitBakeServerConnection(): | ||
167 | def __init__(self, serverinfo): | ||
168 | self.server = serverinfo.server | ||
169 | self.connection = serverinfo.commands | ||
170 | self.events = bb.server.none.BBUIEventQueue(self.server) | ||
171 | |||
172 | def terminate(self): | ||
173 | try: | ||
174 | self.events.system_quit() | ||
175 | except: | ||
176 | pass | ||
177 | try: | ||
178 | self.connection.terminateServer() | ||
179 | except: | ||
180 | pass | ||
181 | |||
diff --git a/bitbake-dev/lib/bb/server/xmlrpc.py b/bitbake-dev/lib/bb/server/xmlrpc.py deleted file mode 100644 index 3364918c77..0000000000 --- a/bitbake-dev/lib/bb/server/xmlrpc.py +++ /dev/null | |||
@@ -1,187 +0,0 @@ | |||
1 | # | ||
2 | # BitBake XMLRPC Server | ||
3 | # | ||
4 | # Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer | ||
5 | # Copyright (C) 2006 - 2008 Richard Purdie | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | |||
20 | """ | ||
21 | This module implements an xmlrpc server for BitBake. | ||
22 | |||
23 | Use this by deriving a class from BitBakeXMLRPCServer and then adding | ||
24 | methods which you want to "export" via XMLRPC. If the methods have the | ||
25 | prefix xmlrpc_, then registering those function will happen automatically, | ||
26 | if not, you need to call register_function. | ||
27 | |||
28 | Use register_idle_function() to add a function which the xmlrpc server | ||
29 | calls from within server_forever when no requests are pending. Make sure | ||
30 | that those functions are non-blocking or else you will introduce latency | ||
31 | in the server's main loop. | ||
32 | """ | ||
33 | |||
34 | import bb | ||
35 | import xmlrpclib, sys | ||
36 | from bb import daemonize | ||
37 | from bb.ui import uievent | ||
38 | |||
39 | DEBUG = False | ||
40 | |||
41 | from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler | ||
42 | import inspect, select | ||
43 | |||
44 | if sys.hexversion < 0x020600F0: | ||
45 | print "Sorry, python 2.6 or later is required for bitbake's XMLRPC mode" | ||
46 | sys.exit(1) | ||
47 | |||
48 | class BitBakeServerCommands(): | ||
49 | def __init__(self, server, cooker): | ||
50 | self.cooker = cooker | ||
51 | self.server = server | ||
52 | |||
53 | def registerEventHandler(self, host, port): | ||
54 | """ | ||
55 | Register a remote UI Event Handler | ||
56 | """ | ||
57 | s = xmlrpclib.Server("http://%s:%d" % (host, port), allow_none=True) | ||
58 | return bb.event.register_UIHhandler(s) | ||
59 | |||
60 | def unregisterEventHandler(self, handlerNum): | ||
61 | """ | ||
62 | Unregister a remote UI Event Handler | ||
63 | """ | ||
64 | return bb.event.unregister_UIHhandler(handlerNum) | ||
65 | |||
66 | def runCommand(self, command): | ||
67 | """ | ||
68 | Run a cooker command on the server | ||
69 | """ | ||
70 | return self.cooker.command.runCommand(command) | ||
71 | |||
72 | def terminateServer(self): | ||
73 | """ | ||
74 | Trigger the server to quit | ||
75 | """ | ||
76 | self.server.quit = True | ||
77 | print "Server (cooker) exitting" | ||
78 | return | ||
79 | |||
80 | def ping(self): | ||
81 | """ | ||
82 | Dummy method which can be used to check the server is still alive | ||
83 | """ | ||
84 | return True | ||
85 | |||
86 | class BitBakeServer(SimpleXMLRPCServer): | ||
87 | # remove this when you're done with debugging | ||
88 | # allow_reuse_address = True | ||
89 | |||
90 | def __init__(self, cooker, interface = ("localhost", 0)): | ||
91 | """ | ||
92 | Constructor | ||
93 | """ | ||
94 | SimpleXMLRPCServer.__init__(self, interface, | ||
95 | requestHandler=SimpleXMLRPCRequestHandler, | ||
96 | logRequests=False, allow_none=True) | ||
97 | self._idlefuns = {} | ||
98 | self.host, self.port = self.socket.getsockname() | ||
99 | #self.register_introspection_functions() | ||
100 | commands = BitBakeServerCommands(self, cooker) | ||
101 | self.autoregister_all_functions(commands, "") | ||
102 | |||
103 | def autoregister_all_functions(self, context, prefix): | ||
104 | """ | ||
105 | Convenience method for registering all functions in the scope | ||
106 | of this class that start with a common prefix | ||
107 | """ | ||
108 | methodlist = inspect.getmembers(context, inspect.ismethod) | ||
109 | for name, method in methodlist: | ||
110 | if name.startswith(prefix): | ||
111 | self.register_function(method, name[len(prefix):]) | ||
112 | |||
113 | def register_idle_function(self, function, data): | ||
114 | """Register a function to be called while the server is idle""" | ||
115 | assert callable(function) | ||
116 | self._idlefuns[function] = data | ||
117 | |||
118 | def serve_forever(self): | ||
119 | """ | ||
120 | Serve Requests. Overloaded to honor a quit command | ||
121 | """ | ||
122 | self.quit = False | ||
123 | self.timeout = 0 # Run Idle calls for our first callback | ||
124 | while not self.quit: | ||
125 | #print "Idle queue length %s" % len(self._idlefuns) | ||
126 | self.handle_request() | ||
127 | #print "Idle timeout, running idle functions" | ||
128 | nextsleep = None | ||
129 | for function, data in self._idlefuns.items(): | ||
130 | try: | ||
131 | retval = function(self, data, False) | ||
132 | if retval is False: | ||
133 | del self._idlefuns[function] | ||
134 | elif retval is True: | ||
135 | nextsleep = 0 | ||
136 | elif nextsleep is 0: | ||
137 | continue | ||
138 | elif nextsleep is None: | ||
139 | nextsleep = retval | ||
140 | elif retval < nextsleep: | ||
141 | nextsleep = retval | ||
142 | except SystemExit: | ||
143 | raise | ||
144 | except: | ||
145 | import traceback | ||
146 | traceback.print_exc() | ||
147 | pass | ||
148 | if nextsleep is None and len(self._idlefuns) > 0: | ||
149 | nextsleep = 0 | ||
150 | self.timeout = nextsleep | ||
151 | # Tell idle functions we're exiting | ||
152 | for function, data in self._idlefuns.items(): | ||
153 | try: | ||
154 | retval = function(self, data, True) | ||
155 | except: | ||
156 | pass | ||
157 | |||
158 | self.server_close() | ||
159 | return | ||
160 | |||
161 | class BitbakeServerInfo(): | ||
162 | def __init__(self, server): | ||
163 | self.host = server.host | ||
164 | self.port = server.port | ||
165 | |||
166 | class BitBakeServerFork(): | ||
167 | def __init__(self, serverinfo, command, logfile): | ||
168 | daemonize.createDaemon(command, logfile) | ||
169 | |||
170 | class BitBakeServerConnection(): | ||
171 | def __init__(self, serverinfo): | ||
172 | self.connection = xmlrpclib.Server("http://%s:%s" % (serverinfo.host, serverinfo.port), allow_none=True) | ||
173 | self.events = uievent.BBUIEventQueue(self.connection) | ||
174 | |||
175 | def terminate(self): | ||
176 | # Don't wait for server indefinitely | ||
177 | import socket | ||
178 | socket.setdefaulttimeout(2) | ||
179 | try: | ||
180 | self.events.system_quit() | ||
181 | except: | ||
182 | pass | ||
183 | try: | ||
184 | self.connection.terminateServer() | ||
185 | except: | ||
186 | pass | ||
187 | |||
diff --git a/bitbake-dev/lib/bb/shell.py b/bitbake-dev/lib/bb/shell.py deleted file mode 100644 index 66e51719a4..0000000000 --- a/bitbake-dev/lib/bb/shell.py +++ /dev/null | |||
@@ -1,824 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | ########################################################################## | ||
4 | # | ||
5 | # Copyright (C) 2005-2006 Michael 'Mickey' Lauer <mickey@Vanille.de> | ||
6 | # Copyright (C) 2005-2006 Vanille Media | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify | ||
9 | # it under the terms of the GNU General Public License version 2 as | ||
10 | # published by the Free Software Foundation. | ||
11 | # | ||
12 | # This program is distributed in the hope that it will be useful, | ||
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | # GNU General Public License for more details. | ||
16 | # | ||
17 | # You should have received a copy of the GNU General Public License along | ||
18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | # | ||
21 | ########################################################################## | ||
22 | # | ||
23 | # Thanks to: | ||
24 | # * Holger Freyther <zecke@handhelds.org> | ||
25 | # * Justin Patrin <papercrane@reversefold.com> | ||
26 | # | ||
27 | ########################################################################## | ||
28 | |||
29 | """ | ||
30 | BitBake Shell | ||
31 | |||
32 | IDEAS: | ||
33 | * list defined tasks per package | ||
34 | * list classes | ||
35 | * toggle force | ||
36 | * command to reparse just one (or more) bbfile(s) | ||
37 | * automatic check if reparsing is necessary (inotify?) | ||
38 | * frontend for bb file manipulation | ||
39 | * more shell-like features: | ||
40 | - output control, i.e. pipe output into grep, sort, etc. | ||
41 | - job control, i.e. bring running commands into background and foreground | ||
42 | * start parsing in background right after startup | ||
43 | * ncurses interface | ||
44 | |||
45 | PROBLEMS: | ||
46 | * force doesn't always work | ||
47 | * readline completion for commands with more than one parameters | ||
48 | |||
49 | """ | ||
50 | |||
51 | ########################################################################## | ||
52 | # Import and setup global variables | ||
53 | ########################################################################## | ||
54 | |||
55 | try: | ||
56 | set | ||
57 | except NameError: | ||
58 | from sets import Set as set | ||
59 | import sys, os, readline, socket, httplib, urllib, commands, popen2, copy, shlex, Queue, fnmatch | ||
60 | from bb import data, parse, build, fatal, cache, taskdata, runqueue, providers as Providers | ||
61 | |||
62 | __version__ = "0.5.3.1" | ||
63 | __credits__ = """BitBake Shell Version %s (C) 2005 Michael 'Mickey' Lauer <mickey@Vanille.de> | ||
64 | Type 'help' for more information, press CTRL-D to exit.""" % __version__ | ||
65 | |||
66 | cmds = {} | ||
67 | leave_mainloop = False | ||
68 | last_exception = None | ||
69 | cooker = None | ||
70 | parsed = False | ||
71 | debug = os.environ.get( "BBSHELL_DEBUG", "" ) | ||
72 | |||
73 | ########################################################################## | ||
74 | # Class BitBakeShellCommands | ||
75 | ########################################################################## | ||
76 | |||
77 | class BitBakeShellCommands: | ||
78 | """This class contains the valid commands for the shell""" | ||
79 | |||
80 | def __init__( self, shell ): | ||
81 | """Register all the commands""" | ||
82 | self._shell = shell | ||
83 | for attr in BitBakeShellCommands.__dict__: | ||
84 | if not attr.startswith( "_" ): | ||
85 | if attr.endswith( "_" ): | ||
86 | command = attr[:-1].lower() | ||
87 | else: | ||
88 | command = attr[:].lower() | ||
89 | method = getattr( BitBakeShellCommands, attr ) | ||
90 | debugOut( "registering command '%s'" % command ) | ||
91 | # scan number of arguments | ||
92 | usage = getattr( method, "usage", "" ) | ||
93 | if usage != "<...>": | ||
94 | numArgs = len( usage.split() ) | ||
95 | else: | ||
96 | numArgs = -1 | ||
97 | shell.registerCommand( command, method, numArgs, "%s %s" % ( command, usage ), method.__doc__ ) | ||
98 | |||
99 | def _checkParsed( self ): | ||
100 | if not parsed: | ||
101 | print "SHELL: This command needs to parse bbfiles..." | ||
102 | self.parse( None ) | ||
103 | |||
104 | def _findProvider( self, item ): | ||
105 | self._checkParsed() | ||
106 | # Need to use taskData for this information | ||
107 | preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 ) | ||
108 | if not preferred: preferred = item | ||
109 | try: | ||
110 | lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status) | ||
111 | except KeyError: | ||
112 | if item in cooker.status.providers: | ||
113 | pf = cooker.status.providers[item][0] | ||
114 | else: | ||
115 | pf = None | ||
116 | return pf | ||
117 | |||
118 | def alias( self, params ): | ||
119 | """Register a new name for a command""" | ||
120 | new, old = params | ||
121 | if not old in cmds: | ||
122 | print "ERROR: Command '%s' not known" % old | ||
123 | else: | ||
124 | cmds[new] = cmds[old] | ||
125 | print "OK" | ||
126 | alias.usage = "<alias> <command>" | ||
127 | |||
128 | def buffer( self, params ): | ||
129 | """Dump specified output buffer""" | ||
130 | index = params[0] | ||
131 | print self._shell.myout.buffer( int( index ) ) | ||
132 | buffer.usage = "<index>" | ||
133 | |||
134 | def buffers( self, params ): | ||
135 | """Show the available output buffers""" | ||
136 | commands = self._shell.myout.bufferedCommands() | ||
137 | if not commands: | ||
138 | print "SHELL: No buffered commands available yet. Start doing something." | ||
139 | else: | ||
140 | print "="*35, "Available Output Buffers", "="*27 | ||
141 | for index, cmd in enumerate( commands ): | ||
142 | print "| %s %s" % ( str( index ).ljust( 3 ), cmd ) | ||
143 | print "="*88 | ||
144 | |||
145 | def build( self, params, cmd = "build" ): | ||
146 | """Build a providee""" | ||
147 | global last_exception | ||
148 | globexpr = params[0] | ||
149 | self._checkParsed() | ||
150 | names = globfilter( cooker.status.pkg_pn.keys(), globexpr ) | ||
151 | if len( names ) == 0: names = [ globexpr ] | ||
152 | print "SHELL: Building %s" % ' '.join( names ) | ||
153 | |||
154 | td = taskdata.TaskData(cooker.configuration.abort) | ||
155 | localdata = data.createCopy(cooker.configuration.data) | ||
156 | data.update_data(localdata) | ||
157 | data.expandKeys(localdata) | ||
158 | |||
159 | try: | ||
160 | tasks = [] | ||
161 | for name in names: | ||
162 | td.add_provider(localdata, cooker.status, name) | ||
163 | providers = td.get_provider(name) | ||
164 | |||
165 | if len(providers) == 0: | ||
166 | raise Providers.NoProvider | ||
167 | |||
168 | tasks.append([name, "do_%s" % cmd]) | ||
169 | |||
170 | td.add_unresolved(localdata, cooker.status) | ||
171 | |||
172 | rq = runqueue.RunQueue(cooker, localdata, cooker.status, td, tasks) | ||
173 | rq.prepare_runqueue() | ||
174 | rq.execute_runqueue() | ||
175 | |||
176 | except Providers.NoProvider: | ||
177 | print "ERROR: No Provider" | ||
178 | last_exception = Providers.NoProvider | ||
179 | |||
180 | except runqueue.TaskFailure, fnids: | ||
181 | for fnid in fnids: | ||
182 | print "ERROR: '%s' failed" % td.fn_index[fnid] | ||
183 | last_exception = runqueue.TaskFailure | ||
184 | |||
185 | except build.EventException, e: | ||
186 | print "ERROR: Couldn't build '%s'" % names | ||
187 | last_exception = e | ||
188 | |||
189 | |||
190 | build.usage = "<providee>" | ||
191 | |||
192 | def clean( self, params ): | ||
193 | """Clean a providee""" | ||
194 | self.build( params, "clean" ) | ||
195 | clean.usage = "<providee>" | ||
196 | |||
197 | def compile( self, params ): | ||
198 | """Execute 'compile' on a providee""" | ||
199 | self.build( params, "compile" ) | ||
200 | compile.usage = "<providee>" | ||
201 | |||
202 | def configure( self, params ): | ||
203 | """Execute 'configure' on a providee""" | ||
204 | self.build( params, "configure" ) | ||
205 | configure.usage = "<providee>" | ||
206 | |||
207 | def install( self, params ): | ||
208 | """Execute 'install' on a providee""" | ||
209 | self.build( params, "install" ) | ||
210 | install.usage = "<providee>" | ||
211 | |||
212 | def edit( self, params ): | ||
213 | """Call $EDITOR on a providee""" | ||
214 | name = params[0] | ||
215 | bbfile = self._findProvider( name ) | ||
216 | if bbfile is not None: | ||
217 | os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), bbfile ) ) | ||
218 | else: | ||
219 | print "ERROR: Nothing provides '%s'" % name | ||
220 | edit.usage = "<providee>" | ||
221 | |||
222 | def environment( self, params ): | ||
223 | """Dump out the outer BitBake environment""" | ||
224 | cooker.showEnvironment() | ||
225 | |||
226 | def exit_( self, params ): | ||
227 | """Leave the BitBake Shell""" | ||
228 | debugOut( "setting leave_mainloop to true" ) | ||
229 | global leave_mainloop | ||
230 | leave_mainloop = True | ||
231 | |||
232 | def fetch( self, params ): | ||
233 | """Fetch a providee""" | ||
234 | self.build( params, "fetch" ) | ||
235 | fetch.usage = "<providee>" | ||
236 | |||
237 | def fileBuild( self, params, cmd = "build" ): | ||
238 | """Parse and build a .bb file""" | ||
239 | global last_exception | ||
240 | name = params[0] | ||
241 | bf = completeFilePath( name ) | ||
242 | print "SHELL: Calling '%s' on '%s'" % ( cmd, bf ) | ||
243 | |||
244 | try: | ||
245 | cooker.buildFile(bf, cmd) | ||
246 | except parse.ParseError: | ||
247 | print "ERROR: Unable to open or parse '%s'" % bf | ||
248 | except build.EventException, e: | ||
249 | print "ERROR: Couldn't build '%s'" % name | ||
250 | last_exception = e | ||
251 | |||
252 | fileBuild.usage = "<bbfile>" | ||
253 | |||
254 | def fileClean( self, params ): | ||
255 | """Clean a .bb file""" | ||
256 | self.fileBuild( params, "clean" ) | ||
257 | fileClean.usage = "<bbfile>" | ||
258 | |||
259 | def fileEdit( self, params ): | ||
260 | """Call $EDITOR on a .bb file""" | ||
261 | name = params[0] | ||
262 | os.system( "%s %s" % ( os.environ.get( "EDITOR", "vi" ), completeFilePath( name ) ) ) | ||
263 | fileEdit.usage = "<bbfile>" | ||
264 | |||
265 | def fileRebuild( self, params ): | ||
266 | """Rebuild (clean & build) a .bb file""" | ||
267 | self.fileBuild( params, "rebuild" ) | ||
268 | fileRebuild.usage = "<bbfile>" | ||
269 | |||
270 | def fileReparse( self, params ): | ||
271 | """(re)Parse a bb file""" | ||
272 | bbfile = params[0] | ||
273 | print "SHELL: Parsing '%s'" % bbfile | ||
274 | parse.update_mtime( bbfile ) | ||
275 | cooker.bb_cache.cacheValidUpdate(bbfile) | ||
276 | fromCache = cooker.bb_cache.loadData(bbfile, cooker.configuration.data, cooker.status) | ||
277 | cooker.bb_cache.sync() | ||
278 | if False: #fromCache: | ||
279 | print "SHELL: File has not been updated, not reparsing" | ||
280 | else: | ||
281 | print "SHELL: Parsed" | ||
282 | fileReparse.usage = "<bbfile>" | ||
283 | |||
284 | def abort( self, params ): | ||
285 | """Toggle abort task execution flag (see bitbake -k)""" | ||
286 | cooker.configuration.abort = not cooker.configuration.abort | ||
287 | print "SHELL: Abort Flag is now '%s'" % repr( cooker.configuration.abort ) | ||
288 | |||
289 | def force( self, params ): | ||
290 | """Toggle force task execution flag (see bitbake -f)""" | ||
291 | cooker.configuration.force = not cooker.configuration.force | ||
292 | print "SHELL: Force Flag is now '%s'" % repr( cooker.configuration.force ) | ||
293 | |||
294 | def help( self, params ): | ||
295 | """Show a comprehensive list of commands and their purpose""" | ||
296 | print "="*30, "Available Commands", "="*30 | ||
297 | allcmds = cmds.keys() | ||
298 | allcmds.sort() | ||
299 | for cmd in allcmds: | ||
300 | function,numparams,usage,helptext = cmds[cmd] | ||
301 | print "| %s | %s" % (usage.ljust(30), helptext) | ||
302 | print "="*78 | ||
303 | |||
304 | def lastError( self, params ): | ||
305 | """Show the reason or log that was produced by the last BitBake event exception""" | ||
306 | if last_exception is None: | ||
307 | print "SHELL: No Errors yet (Phew)..." | ||
308 | else: | ||
309 | reason, event = last_exception.args | ||
310 | print "SHELL: Reason for the last error: '%s'" % reason | ||
311 | if ':' in reason: | ||
312 | msg, filename = reason.split( ':' ) | ||
313 | filename = filename.strip() | ||
314 | print "SHELL: Dumping log file for last error:" | ||
315 | try: | ||
316 | print open( filename ).read() | ||
317 | except IOError: | ||
318 | print "ERROR: Couldn't open '%s'" % filename | ||
319 | |||
320 | def match( self, params ): | ||
321 | """Dump all files or providers matching a glob expression""" | ||
322 | what, globexpr = params | ||
323 | if what == "files": | ||
324 | self._checkParsed() | ||
325 | for key in globfilter( cooker.status.pkg_fn.keys(), globexpr ): print key | ||
326 | elif what == "providers": | ||
327 | self._checkParsed() | ||
328 | for key in globfilter( cooker.status.pkg_pn.keys(), globexpr ): print key | ||
329 | else: | ||
330 | print "Usage: match %s" % self.print_.usage | ||
331 | match.usage = "<files|providers> <glob>" | ||
332 | |||
333 | def new( self, params ): | ||
334 | """Create a new .bb file and open the editor""" | ||
335 | dirname, filename = params | ||
336 | packages = '/'.join( data.getVar( "BBFILES", cooker.configuration.data, 1 ).split('/')[:-2] ) | ||
337 | fulldirname = "%s/%s" % ( packages, dirname ) | ||
338 | |||
339 | if not os.path.exists( fulldirname ): | ||
340 | print "SHELL: Creating '%s'" % fulldirname | ||
341 | os.mkdir( fulldirname ) | ||
342 | if os.path.exists( fulldirname ) and os.path.isdir( fulldirname ): | ||
343 | if os.path.exists( "%s/%s" % ( fulldirname, filename ) ): | ||
344 | print "SHELL: ERROR: %s/%s already exists" % ( fulldirname, filename ) | ||
345 | return False | ||
346 | print "SHELL: Creating '%s/%s'" % ( fulldirname, filename ) | ||
347 | newpackage = open( "%s/%s" % ( fulldirname, filename ), "w" ) | ||
348 | print >>newpackage,"""DESCRIPTION = "" | ||
349 | SECTION = "" | ||
350 | AUTHOR = "" | ||
351 | HOMEPAGE = "" | ||
352 | MAINTAINER = "" | ||
353 | LICENSE = "GPL" | ||
354 | PR = "r0" | ||
355 | |||
356 | SRC_URI = "" | ||
357 | |||
358 | #inherit base | ||
359 | |||
360 | #do_configure() { | ||
361 | # | ||
362 | #} | ||
363 | |||
364 | #do_compile() { | ||
365 | # | ||
366 | #} | ||
367 | |||
368 | #do_stage() { | ||
369 | # | ||
370 | #} | ||
371 | |||
372 | #do_install() { | ||
373 | # | ||
374 | #} | ||
375 | """ | ||
376 | newpackage.close() | ||
377 | os.system( "%s %s/%s" % ( os.environ.get( "EDITOR" ), fulldirname, filename ) ) | ||
378 | new.usage = "<directory> <filename>" | ||
379 | |||
380 | def package( self, params ): | ||
381 | """Execute 'package' on a providee""" | ||
382 | self.build( params, "package" ) | ||
383 | package.usage = "<providee>" | ||
384 | |||
385 | def pasteBin( self, params ): | ||
386 | """Send a command + output buffer to the pastebin at http://rafb.net/paste""" | ||
387 | index = params[0] | ||
388 | contents = self._shell.myout.buffer( int( index ) ) | ||
389 | sendToPastebin( "output of " + params[0], contents ) | ||
390 | pasteBin.usage = "<index>" | ||
391 | |||
392 | def pasteLog( self, params ): | ||
393 | """Send the last event exception error log (if there is one) to http://rafb.net/paste""" | ||
394 | if last_exception is None: | ||
395 | print "SHELL: No Errors yet (Phew)..." | ||
396 | else: | ||
397 | reason, event = last_exception.args | ||
398 | print "SHELL: Reason for the last error: '%s'" % reason | ||
399 | if ':' in reason: | ||
400 | msg, filename = reason.split( ':' ) | ||
401 | filename = filename.strip() | ||
402 | print "SHELL: Pasting log file to pastebin..." | ||
403 | |||
404 | file = open( filename ).read() | ||
405 | sendToPastebin( "contents of " + filename, file ) | ||
406 | |||
407 | def patch( self, params ): | ||
408 | """Execute 'patch' command on a providee""" | ||
409 | self.build( params, "patch" ) | ||
410 | patch.usage = "<providee>" | ||
411 | |||
412 | def parse( self, params ): | ||
413 | """(Re-)parse .bb files and calculate the dependency graph""" | ||
414 | cooker.status = cache.CacheData() | ||
415 | ignore = data.getVar("ASSUME_PROVIDED", cooker.configuration.data, 1) or "" | ||
416 | cooker.status.ignored_dependencies = set( ignore.split() ) | ||
417 | cooker.handleCollections( data.getVar("BBFILE_COLLECTIONS", cooker.configuration.data, 1) ) | ||
418 | |||
419 | (filelist, masked) = cooker.collect_bbfiles() | ||
420 | cooker.parse_bbfiles(filelist, masked, cooker.myProgressCallback) | ||
421 | cooker.buildDepgraph() | ||
422 | global parsed | ||
423 | parsed = True | ||
424 | |||
425 | |||
426 | def reparse( self, params ): | ||
427 | """(re)Parse a providee's bb file""" | ||
428 | bbfile = self._findProvider( params[0] ) | ||
429 | if bbfile is not None: | ||
430 | print "SHELL: Found bbfile '%s' for '%s'" % ( bbfile, params[0] ) | ||
431 | self.fileReparse( [ bbfile ] ) | ||
432 | else: | ||
433 | print "ERROR: Nothing provides '%s'" % params[0] | ||
434 | reparse.usage = "<providee>" | ||
435 | |||
436 | def getvar( self, params ): | ||
437 | """Dump the contents of an outer BitBake environment variable""" | ||
438 | var = params[0] | ||
439 | value = data.getVar( var, cooker.configuration.data, 1 ) | ||
440 | print value | ||
441 | getvar.usage = "<variable>" | ||
442 | |||
443 | def peek( self, params ): | ||
444 | """Dump contents of variable defined in providee's metadata""" | ||
445 | name, var = params | ||
446 | bbfile = self._findProvider( name ) | ||
447 | if bbfile is not None: | ||
448 | the_data = cooker.bb_cache.loadDataFull(bbfile, cooker.configuration.data) | ||
449 | value = the_data.getVar( var, 1 ) | ||
450 | print value | ||
451 | else: | ||
452 | print "ERROR: Nothing provides '%s'" % name | ||
453 | peek.usage = "<providee> <variable>" | ||
454 | |||
455 | def poke( self, params ): | ||
456 | """Set contents of variable defined in providee's metadata""" | ||
457 | name, var, value = params | ||
458 | bbfile = self._findProvider( name ) | ||
459 | if bbfile is not None: | ||
460 | print "ERROR: Sorry, this functionality is currently broken" | ||
461 | #d = cooker.pkgdata[bbfile] | ||
462 | #data.setVar( var, value, d ) | ||
463 | |||
464 | # mark the change semi persistant | ||
465 | #cooker.pkgdata.setDirty(bbfile, d) | ||
466 | #print "OK" | ||
467 | else: | ||
468 | print "ERROR: Nothing provides '%s'" % name | ||
469 | poke.usage = "<providee> <variable> <value>" | ||
470 | |||
471 | def print_( self, params ): | ||
472 | """Dump all files or providers""" | ||
473 | what = params[0] | ||
474 | if what == "files": | ||
475 | self._checkParsed() | ||
476 | for key in cooker.status.pkg_fn.keys(): print key | ||
477 | elif what == "providers": | ||
478 | self._checkParsed() | ||
479 | for key in cooker.status.providers.keys(): print key | ||
480 | else: | ||
481 | print "Usage: print %s" % self.print_.usage | ||
482 | print_.usage = "<files|providers>" | ||
483 | |||
484 | def python( self, params ): | ||
485 | """Enter the expert mode - an interactive BitBake Python Interpreter""" | ||
486 | sys.ps1 = "EXPERT BB>>> " | ||
487 | sys.ps2 = "EXPERT BB... " | ||
488 | import code | ||
489 | interpreter = code.InteractiveConsole( dict( globals() ) ) | ||
490 | interpreter.interact( "SHELL: Expert Mode - BitBake Python %s\nType 'help' for more information, press CTRL-D to switch back to BBSHELL." % sys.version ) | ||
491 | |||
492 | def showdata( self, params ): | ||
493 | """Execute 'showdata' on a providee""" | ||
494 | cooker.showEnvironment(None, params) | ||
495 | showdata.usage = "<providee>" | ||
496 | |||
497 | def setVar( self, params ): | ||
498 | """Set an outer BitBake environment variable""" | ||
499 | var, value = params | ||
500 | data.setVar( var, value, cooker.configuration.data ) | ||
501 | print "OK" | ||
502 | setVar.usage = "<variable> <value>" | ||
503 | |||
504 | def rebuild( self, params ): | ||
505 | """Clean and rebuild a .bb file or a providee""" | ||
506 | self.build( params, "clean" ) | ||
507 | self.build( params, "build" ) | ||
508 | rebuild.usage = "<providee>" | ||
509 | |||
510 | def shell( self, params ): | ||
511 | """Execute a shell command and dump the output""" | ||
512 | if params != "": | ||
513 | print commands.getoutput( " ".join( params ) ) | ||
514 | shell.usage = "<...>" | ||
515 | |||
516 | def stage( self, params ): | ||
517 | """Execute 'stage' on a providee""" | ||
518 | self.build( params, "populate_staging" ) | ||
519 | stage.usage = "<providee>" | ||
520 | |||
521 | def status( self, params ): | ||
522 | """<just for testing>""" | ||
523 | print "-" * 78 | ||
524 | print "building list = '%s'" % cooker.building_list | ||
525 | print "build path = '%s'" % cooker.build_path | ||
526 | print "consider_msgs_cache = '%s'" % cooker.consider_msgs_cache | ||
527 | print "build stats = '%s'" % cooker.stats | ||
528 | if last_exception is not None: print "last_exception = '%s'" % repr( last_exception.args ) | ||
529 | print "memory output contents = '%s'" % self._shell.myout._buffer | ||
530 | |||
531 | def test( self, params ): | ||
532 | """<just for testing>""" | ||
533 | print "testCommand called with '%s'" % params | ||
534 | |||
535 | def unpack( self, params ): | ||
536 | """Execute 'unpack' on a providee""" | ||
537 | self.build( params, "unpack" ) | ||
538 | unpack.usage = "<providee>" | ||
539 | |||
540 | def which( self, params ): | ||
541 | """Computes the providers for a given providee""" | ||
542 | # Need to use taskData for this information | ||
543 | item = params[0] | ||
544 | |||
545 | self._checkParsed() | ||
546 | |||
547 | preferred = data.getVar( "PREFERRED_PROVIDER_%s" % item, cooker.configuration.data, 1 ) | ||
548 | if not preferred: preferred = item | ||
549 | |||
550 | try: | ||
551 | lv, lf, pv, pf = Providers.findBestProvider(preferred, cooker.configuration.data, cooker.status) | ||
552 | except KeyError: | ||
553 | lv, lf, pv, pf = (None,)*4 | ||
554 | |||
555 | try: | ||
556 | providers = cooker.status.providers[item] | ||
557 | except KeyError: | ||
558 | print "SHELL: ERROR: Nothing provides", preferred | ||
559 | else: | ||
560 | for provider in providers: | ||
561 | if provider == pf: provider = " (***) %s" % provider | ||
562 | else: provider = " %s" % provider | ||
563 | print provider | ||
564 | which.usage = "<providee>" | ||
565 | |||
566 | ########################################################################## | ||
567 | # Common helper functions | ||
568 | ########################################################################## | ||
569 | |||
570 | def completeFilePath( bbfile ): | ||
571 | """Get the complete bbfile path""" | ||
572 | if not cooker.status: return bbfile | ||
573 | if not cooker.status.pkg_fn: return bbfile | ||
574 | for key in cooker.status.pkg_fn.keys(): | ||
575 | if key.endswith( bbfile ): | ||
576 | return key | ||
577 | return bbfile | ||
578 | |||
579 | def sendToPastebin( desc, content ): | ||
580 | """Send content to http://oe.pastebin.com""" | ||
581 | mydata = {} | ||
582 | mydata["lang"] = "Plain Text" | ||
583 | mydata["desc"] = desc | ||
584 | mydata["cvt_tabs"] = "No" | ||
585 | mydata["nick"] = "%s@%s" % ( os.environ.get( "USER", "unknown" ), socket.gethostname() or "unknown" ) | ||
586 | mydata["text"] = content | ||
587 | params = urllib.urlencode( mydata ) | ||
588 | headers = {"Content-type": "application/x-www-form-urlencoded","Accept": "text/plain"} | ||
589 | |||
590 | host = "rafb.net" | ||
591 | conn = httplib.HTTPConnection( "%s:80" % host ) | ||
592 | conn.request("POST", "/paste/paste.php", params, headers ) | ||
593 | |||
594 | response = conn.getresponse() | ||
595 | conn.close() | ||
596 | |||
597 | if response.status == 302: | ||
598 | location = response.getheader( "location" ) or "unknown" | ||
599 | print "SHELL: Pasted to http://%s%s" % ( host, location ) | ||
600 | else: | ||
601 | print "ERROR: %s %s" % ( response.status, response.reason ) | ||
602 | |||
603 | def completer( text, state ): | ||
604 | """Return a possible readline completion""" | ||
605 | debugOut( "completer called with text='%s', state='%d'" % ( text, state ) ) | ||
606 | |||
607 | if state == 0: | ||
608 | line = readline.get_line_buffer() | ||
609 | if " " in line: | ||
610 | line = line.split() | ||
611 | # we are in second (or more) argument | ||
612 | if line[0] in cmds and hasattr( cmds[line[0]][0], "usage" ): # known command and usage | ||
613 | u = getattr( cmds[line[0]][0], "usage" ).split()[0] | ||
614 | if u == "<variable>": | ||
615 | allmatches = cooker.configuration.data.keys() | ||
616 | elif u == "<bbfile>": | ||
617 | if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ] | ||
618 | else: allmatches = [ x.split("/")[-1] for x in cooker.status.pkg_fn.keys() ] | ||
619 | elif u == "<providee>": | ||
620 | if cooker.status.pkg_fn is None: allmatches = [ "(No Matches Available. Parsed yet?)" ] | ||
621 | else: allmatches = cooker.status.providers.iterkeys() | ||
622 | else: allmatches = [ "(No tab completion available for this command)" ] | ||
623 | else: allmatches = [ "(No tab completion available for this command)" ] | ||
624 | else: | ||
625 | # we are in first argument | ||
626 | allmatches = cmds.iterkeys() | ||
627 | |||
628 | completer.matches = [ x for x in allmatches if x[:len(text)] == text ] | ||
629 | #print "completer.matches = '%s'" % completer.matches | ||
630 | if len( completer.matches ) > state: | ||
631 | return completer.matches[state] | ||
632 | else: | ||
633 | return None | ||
634 | |||
635 | def debugOut( text ): | ||
636 | if debug: | ||
637 | sys.stderr.write( "( %s )\n" % text ) | ||
638 | |||
639 | def columnize( alist, width = 80 ): | ||
640 | """ | ||
641 | A word-wrap function that preserves existing line breaks | ||
642 | and most spaces in the text. Expects that existing line | ||
643 | breaks are posix newlines (\n). | ||
644 | """ | ||
645 | return reduce(lambda line, word, width=width: '%s%s%s' % | ||
646 | (line, | ||
647 | ' \n'[(len(line[line.rfind('\n')+1:]) | ||
648 | + len(word.split('\n',1)[0] | ||
649 | ) >= width)], | ||
650 | word), | ||
651 | alist | ||
652 | ) | ||
653 | |||
654 | def globfilter( names, pattern ): | ||
655 | return fnmatch.filter( names, pattern ) | ||
656 | |||
657 | ########################################################################## | ||
658 | # Class MemoryOutput | ||
659 | ########################################################################## | ||
660 | |||
661 | class MemoryOutput: | ||
662 | """File-like output class buffering the output of the last 10 commands""" | ||
663 | def __init__( self, delegate ): | ||
664 | self.delegate = delegate | ||
665 | self._buffer = [] | ||
666 | self.text = [] | ||
667 | self._command = None | ||
668 | |||
669 | def startCommand( self, command ): | ||
670 | self._command = command | ||
671 | self.text = [] | ||
672 | def endCommand( self ): | ||
673 | if self._command is not None: | ||
674 | if len( self._buffer ) == 10: del self._buffer[0] | ||
675 | self._buffer.append( ( self._command, self.text ) ) | ||
676 | def removeLast( self ): | ||
677 | if self._buffer: | ||
678 | del self._buffer[ len( self._buffer ) - 1 ] | ||
679 | self.text = [] | ||
680 | self._command = None | ||
681 | def lastBuffer( self ): | ||
682 | if self._buffer: | ||
683 | return self._buffer[ len( self._buffer ) -1 ][1] | ||
684 | def bufferedCommands( self ): | ||
685 | return [ cmd for cmd, output in self._buffer ] | ||
686 | def buffer( self, i ): | ||
687 | if i < len( self._buffer ): | ||
688 | return "BB>> %s\n%s" % ( self._buffer[i][0], "".join( self._buffer[i][1] ) ) | ||
689 | else: return "ERROR: Invalid buffer number. Buffer needs to be in (0, %d)" % ( len( self._buffer ) - 1 ) | ||
690 | def write( self, text ): | ||
691 | if self._command is not None and text != "BB>> ": self.text.append( text ) | ||
692 | if self.delegate is not None: self.delegate.write( text ) | ||
693 | def flush( self ): | ||
694 | return self.delegate.flush() | ||
695 | def fileno( self ): | ||
696 | return self.delegate.fileno() | ||
697 | def isatty( self ): | ||
698 | return self.delegate.isatty() | ||
699 | |||
700 | ########################################################################## | ||
701 | # Class BitBakeShell | ||
702 | ########################################################################## | ||
703 | |||
704 | class BitBakeShell: | ||
705 | |||
706 | def __init__( self ): | ||
707 | """Register commands and set up readline""" | ||
708 | self.commandQ = Queue.Queue() | ||
709 | self.commands = BitBakeShellCommands( self ) | ||
710 | self.myout = MemoryOutput( sys.stdout ) | ||
711 | self.historyfilename = os.path.expanduser( "~/.bbsh_history" ) | ||
712 | self.startupfilename = os.path.expanduser( "~/.bbsh_startup" ) | ||
713 | |||
714 | readline.set_completer( completer ) | ||
715 | readline.set_completer_delims( " " ) | ||
716 | readline.parse_and_bind("tab: complete") | ||
717 | |||
718 | try: | ||
719 | readline.read_history_file( self.historyfilename ) | ||
720 | except IOError: | ||
721 | pass # It doesn't exist yet. | ||
722 | |||
723 | print __credits__ | ||
724 | |||
725 | def cleanup( self ): | ||
726 | """Write readline history and clean up resources""" | ||
727 | debugOut( "writing command history" ) | ||
728 | try: | ||
729 | readline.write_history_file( self.historyfilename ) | ||
730 | except: | ||
731 | print "SHELL: Unable to save command history" | ||
732 | |||
733 | def registerCommand( self, command, function, numparams = 0, usage = "", helptext = "" ): | ||
734 | """Register a command""" | ||
735 | if usage == "": usage = command | ||
736 | if helptext == "": helptext = function.__doc__ or "<not yet documented>" | ||
737 | cmds[command] = ( function, numparams, usage, helptext ) | ||
738 | |||
739 | def processCommand( self, command, params ): | ||
740 | """Process a command. Check number of params and print a usage string, if appropriate""" | ||
741 | debugOut( "processing command '%s'..." % command ) | ||
742 | try: | ||
743 | function, numparams, usage, helptext = cmds[command] | ||
744 | except KeyError: | ||
745 | print "SHELL: ERROR: '%s' command is not a valid command." % command | ||
746 | self.myout.removeLast() | ||
747 | else: | ||
748 | if (numparams != -1) and (not len( params ) == numparams): | ||
749 | print "Usage: '%s'" % usage | ||
750 | return | ||
751 | |||
752 | result = function( self.commands, params ) | ||
753 | debugOut( "result was '%s'" % result ) | ||
754 | |||
755 | def processStartupFile( self ): | ||
756 | """Read and execute all commands found in $HOME/.bbsh_startup""" | ||
757 | if os.path.exists( self.startupfilename ): | ||
758 | startupfile = open( self.startupfilename, "r" ) | ||
759 | for cmdline in startupfile: | ||
760 | debugOut( "processing startup line '%s'" % cmdline ) | ||
761 | if not cmdline: | ||
762 | continue | ||
763 | if "|" in cmdline: | ||
764 | print "ERROR: '|' in startup file is not allowed. Ignoring line" | ||
765 | continue | ||
766 | self.commandQ.put( cmdline.strip() ) | ||
767 | |||
768 | def main( self ): | ||
769 | """The main command loop""" | ||
770 | while not leave_mainloop: | ||
771 | try: | ||
772 | if self.commandQ.empty(): | ||
773 | sys.stdout = self.myout.delegate | ||
774 | cmdline = raw_input( "BB>> " ) | ||
775 | sys.stdout = self.myout | ||
776 | else: | ||
777 | cmdline = self.commandQ.get() | ||
778 | if cmdline: | ||
779 | allCommands = cmdline.split( ';' ) | ||
780 | for command in allCommands: | ||
781 | pipecmd = None | ||
782 | # | ||
783 | # special case for expert mode | ||
784 | if command == 'python': | ||
785 | sys.stdout = self.myout.delegate | ||
786 | self.processCommand( command, "" ) | ||
787 | sys.stdout = self.myout | ||
788 | else: | ||
789 | self.myout.startCommand( command ) | ||
790 | if '|' in command: # disable output | ||
791 | command, pipecmd = command.split( '|' ) | ||
792 | delegate = self.myout.delegate | ||
793 | self.myout.delegate = None | ||
794 | tokens = shlex.split( command, True ) | ||
795 | self.processCommand( tokens[0], tokens[1:] or "" ) | ||
796 | self.myout.endCommand() | ||
797 | if pipecmd is not None: # restore output | ||
798 | self.myout.delegate = delegate | ||
799 | |||
800 | pipe = popen2.Popen4( pipecmd ) | ||
801 | pipe.tochild.write( "\n".join( self.myout.lastBuffer() ) ) | ||
802 | pipe.tochild.close() | ||
803 | sys.stdout.write( pipe.fromchild.read() ) | ||
804 | # | ||
805 | except EOFError: | ||
806 | |||
807 | return | ||
808 | except KeyboardInterrupt: | ||
809 | |||
810 | |||
811 | ########################################################################## | ||
812 | # Start function - called from the BitBake command line utility | ||
813 | ########################################################################## | ||
814 | |||
815 | def start( aCooker ): | ||
816 | global cooker | ||
817 | cooker = aCooker | ||
818 | bbshell = BitBakeShell() | ||
819 | bbshell.processStartupFile() | ||
820 | bbshell.main() | ||
821 | bbshell.cleanup() | ||
822 | |||
823 | if __name__ == "__main__": | ||
824 | print "SHELL: Sorry, this program should only be called by BitBake." | ||
diff --git a/bitbake-dev/lib/bb/taskdata.py b/bitbake-dev/lib/bb/taskdata.py deleted file mode 100644 index 4a88e75f6d..0000000000 --- a/bitbake-dev/lib/bb/taskdata.py +++ /dev/null | |||
@@ -1,610 +0,0 @@ | |||
1 | #!/usr/bin/env python | ||
2 | # ex:ts=4:sw=4:sts=4:et | ||
3 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
4 | """ | ||
5 | BitBake 'TaskData' implementation | ||
6 | |||
7 | Task data collection and handling | ||
8 | |||
9 | """ | ||
10 | |||
11 | # Copyright (C) 2006 Richard Purdie | ||
12 | # | ||
13 | # This program is free software; you can redistribute it and/or modify | ||
14 | # it under the terms of the GNU General Public License version 2 as | ||
15 | # published by the Free Software Foundation. | ||
16 | # | ||
17 | # This program is distributed in the hope that it will be useful, | ||
18 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
19 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
20 | # GNU General Public License for more details. | ||
21 | # | ||
22 | # You should have received a copy of the GNU General Public License along | ||
23 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
24 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
25 | |||
26 | import bb | ||
27 | |||
28 | def re_match_strings(target, strings): | ||
29 | """ | ||
30 | Whether or not the string 'target' matches | ||
31 | any one string of the strings which can be regular expression string | ||
32 | """ | ||
33 | import re | ||
34 | |||
35 | for name in strings: | ||
36 | if (name==target or | ||
37 | re.search(name,target)!=None): | ||
38 | return True | ||
39 | return False | ||
40 | |||
41 | class TaskData: | ||
42 | """ | ||
43 | BitBake Task Data implementation | ||
44 | """ | ||
45 | def __init__(self, abort = True, tryaltconfigs = False): | ||
46 | self.build_names_index = [] | ||
47 | self.run_names_index = [] | ||
48 | self.fn_index = [] | ||
49 | |||
50 | self.build_targets = {} | ||
51 | self.run_targets = {} | ||
52 | |||
53 | self.external_targets = [] | ||
54 | |||
55 | self.tasks_fnid = [] | ||
56 | self.tasks_name = [] | ||
57 | self.tasks_tdepends = [] | ||
58 | self.tasks_idepends = [] | ||
59 | # Cache to speed up task ID lookups | ||
60 | self.tasks_lookup = {} | ||
61 | |||
62 | self.depids = {} | ||
63 | self.rdepids = {} | ||
64 | |||
65 | self.consider_msgs_cache = [] | ||
66 | |||
67 | self.failed_deps = [] | ||
68 | self.failed_rdeps = [] | ||
69 | self.failed_fnids = [] | ||
70 | |||
71 | self.abort = abort | ||
72 | self.tryaltconfigs = tryaltconfigs | ||
73 | |||
74 | def getbuild_id(self, name): | ||
75 | """ | ||
76 | Return an ID number for the build target name. | ||
77 | If it doesn't exist, create one. | ||
78 | """ | ||
79 | if not name in self.build_names_index: | ||
80 | self.build_names_index.append(name) | ||
81 | return len(self.build_names_index) - 1 | ||
82 | |||
83 | return self.build_names_index.index(name) | ||
84 | |||
85 | def getrun_id(self, name): | ||
86 | """ | ||
87 | Return an ID number for the run target name. | ||
88 | If it doesn't exist, create one. | ||
89 | """ | ||
90 | if not name in self.run_names_index: | ||
91 | self.run_names_index.append(name) | ||
92 | return len(self.run_names_index) - 1 | ||
93 | |||
94 | return self.run_names_index.index(name) | ||
95 | |||
96 | def getfn_id(self, name): | ||
97 | """ | ||
98 | Return an ID number for the filename. | ||
99 | If it doesn't exist, create one. | ||
100 | """ | ||
101 | if not name in self.fn_index: | ||
102 | self.fn_index.append(name) | ||
103 | return len(self.fn_index) - 1 | ||
104 | |||
105 | return self.fn_index.index(name) | ||
106 | |||
107 | def gettask_ids(self, fnid): | ||
108 | """ | ||
109 | Return an array of the ID numbers matching a given fnid. | ||
110 | """ | ||
111 | ids = [] | ||
112 | if fnid in self.tasks_lookup: | ||
113 | for task in self.tasks_lookup[fnid]: | ||
114 | ids.append(self.tasks_lookup[fnid][task]) | ||
115 | return ids | ||
116 | |||
117 | def gettask_id(self, fn, task, create = True): | ||
118 | """ | ||
119 | Return an ID number for the task matching fn and task. | ||
120 | If it doesn't exist, create one by default. | ||
121 | Optionally return None instead. | ||
122 | """ | ||
123 | fnid = self.getfn_id(fn) | ||
124 | |||
125 | if fnid in self.tasks_lookup: | ||
126 | if task in self.tasks_lookup[fnid]: | ||
127 | return self.tasks_lookup[fnid][task] | ||
128 | |||
129 | if not create: | ||
130 | return None | ||
131 | |||
132 | self.tasks_name.append(task) | ||
133 | self.tasks_fnid.append(fnid) | ||
134 | self.tasks_tdepends.append([]) | ||
135 | self.tasks_idepends.append([]) | ||
136 | |||
137 | listid = len(self.tasks_name) - 1 | ||
138 | |||
139 | if fnid not in self.tasks_lookup: | ||
140 | self.tasks_lookup[fnid] = {} | ||
141 | self.tasks_lookup[fnid][task] = listid | ||
142 | |||
143 | return listid | ||
144 | |||
145 | def add_tasks(self, fn, dataCache): | ||
146 | """ | ||
147 | Add tasks for a given fn to the database | ||
148 | """ | ||
149 | |||
150 | task_deps = dataCache.task_deps[fn] | ||
151 | |||
152 | fnid = self.getfn_id(fn) | ||
153 | |||
154 | if fnid in self.failed_fnids: | ||
155 | bb.msg.fatal(bb.msg.domain.TaskData, "Trying to re-add a failed file? Something is broken...") | ||
156 | |||
157 | # Check if we've already seen this fn | ||
158 | if fnid in self.tasks_fnid: | ||
159 | return | ||
160 | |||
161 | for task in task_deps['tasks']: | ||
162 | |||
163 | # Work out task dependencies | ||
164 | parentids = [] | ||
165 | for dep in task_deps['parents'][task]: | ||
166 | parentid = self.gettask_id(fn, dep) | ||
167 | parentids.append(parentid) | ||
168 | taskid = self.gettask_id(fn, task) | ||
169 | self.tasks_tdepends[taskid].extend(parentids) | ||
170 | |||
171 | # Touch all intertask dependencies | ||
172 | if 'depends' in task_deps and task in task_deps['depends']: | ||
173 | ids = [] | ||
174 | for dep in task_deps['depends'][task].split(): | ||
175 | if dep: | ||
176 | if ":" not in dep: | ||
177 | bb.msg.fatal(bb.msg.domain.TaskData, "Error, dependency %s does not contain ':' character\n. Task 'depends' should be specified in the form 'packagename:task'" % (depend, fn)) | ||
178 | ids.append(((self.getbuild_id(dep.split(":")[0])), dep.split(":")[1])) | ||
179 | self.tasks_idepends[taskid].extend(ids) | ||
180 | |||
181 | # Work out build dependencies | ||
182 | if not fnid in self.depids: | ||
183 | dependids = {} | ||
184 | for depend in dataCache.deps[fn]: | ||
185 | bb.msg.debug(2, bb.msg.domain.TaskData, "Added dependency %s for %s" % (depend, fn)) | ||
186 | dependids[self.getbuild_id(depend)] = None | ||
187 | self.depids[fnid] = dependids.keys() | ||
188 | |||
189 | # Work out runtime dependencies | ||
190 | if not fnid in self.rdepids: | ||
191 | rdependids = {} | ||
192 | rdepends = dataCache.rundeps[fn] | ||
193 | rrecs = dataCache.runrecs[fn] | ||
194 | for package in rdepends: | ||
195 | for rdepend in bb.utils.explode_deps(rdepends[package]): | ||
196 | bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime dependency %s for %s" % (rdepend, fn)) | ||
197 | rdependids[self.getrun_id(rdepend)] = None | ||
198 | for package in rrecs: | ||
199 | for rdepend in bb.utils.explode_deps(rrecs[package]): | ||
200 | bb.msg.debug(2, bb.msg.domain.TaskData, "Added runtime recommendation %s for %s" % (rdepend, fn)) | ||
201 | rdependids[self.getrun_id(rdepend)] = None | ||
202 | self.rdepids[fnid] = rdependids.keys() | ||
203 | |||
204 | for dep in self.depids[fnid]: | ||
205 | if dep in self.failed_deps: | ||
206 | self.fail_fnid(fnid) | ||
207 | return | ||
208 | for dep in self.rdepids[fnid]: | ||
209 | if dep in self.failed_rdeps: | ||
210 | self.fail_fnid(fnid) | ||
211 | return | ||
212 | |||
213 | def have_build_target(self, target): | ||
214 | """ | ||
215 | Have we a build target matching this name? | ||
216 | """ | ||
217 | targetid = self.getbuild_id(target) | ||
218 | |||
219 | if targetid in self.build_targets: | ||
220 | return True | ||
221 | return False | ||
222 | |||
223 | def have_runtime_target(self, target): | ||
224 | """ | ||
225 | Have we a runtime target matching this name? | ||
226 | """ | ||
227 | targetid = self.getrun_id(target) | ||
228 | |||
229 | if targetid in self.run_targets: | ||
230 | return True | ||
231 | return False | ||
232 | |||
233 | def add_build_target(self, fn, item): | ||
234 | """ | ||
235 | Add a build target. | ||
236 | If already present, append the provider fn to the list | ||
237 | """ | ||
238 | targetid = self.getbuild_id(item) | ||
239 | fnid = self.getfn_id(fn) | ||
240 | |||
241 | if targetid in self.build_targets: | ||
242 | if fnid in self.build_targets[targetid]: | ||
243 | return | ||
244 | self.build_targets[targetid].append(fnid) | ||
245 | return | ||
246 | self.build_targets[targetid] = [fnid] | ||
247 | |||
248 | def add_runtime_target(self, fn, item): | ||
249 | """ | ||
250 | Add a runtime target. | ||
251 | If already present, append the provider fn to the list | ||
252 | """ | ||
253 | targetid = self.getrun_id(item) | ||
254 | fnid = self.getfn_id(fn) | ||
255 | |||
256 | if targetid in self.run_targets: | ||
257 | if fnid in self.run_targets[targetid]: | ||
258 | return | ||
259 | self.run_targets[targetid].append(fnid) | ||
260 | return | ||
261 | self.run_targets[targetid] = [fnid] | ||
262 | |||
263 | def mark_external_target(self, item): | ||
264 | """ | ||
265 | Mark a build target as being externally requested | ||
266 | """ | ||
267 | targetid = self.getbuild_id(item) | ||
268 | |||
269 | if targetid not in self.external_targets: | ||
270 | self.external_targets.append(targetid) | ||
271 | |||
272 | def get_unresolved_build_targets(self, dataCache): | ||
273 | """ | ||
274 | Return a list of build targets who's providers | ||
275 | are unknown. | ||
276 | """ | ||
277 | unresolved = [] | ||
278 | for target in self.build_names_index: | ||
279 | if re_match_strings(target, dataCache.ignored_dependencies): | ||
280 | continue | ||
281 | if self.build_names_index.index(target) in self.failed_deps: | ||
282 | continue | ||
283 | if not self.have_build_target(target): | ||
284 | unresolved.append(target) | ||
285 | return unresolved | ||
286 | |||
287 | def get_unresolved_run_targets(self, dataCache): | ||
288 | """ | ||
289 | Return a list of runtime targets who's providers | ||
290 | are unknown. | ||
291 | """ | ||
292 | unresolved = [] | ||
293 | for target in self.run_names_index: | ||
294 | if re_match_strings(target, dataCache.ignored_dependencies): | ||
295 | continue | ||
296 | if self.run_names_index.index(target) in self.failed_rdeps: | ||
297 | continue | ||
298 | if not self.have_runtime_target(target): | ||
299 | unresolved.append(target) | ||
300 | return unresolved | ||
301 | |||
302 | def get_provider(self, item): | ||
303 | """ | ||
304 | Return a list of providers of item | ||
305 | """ | ||
306 | targetid = self.getbuild_id(item) | ||
307 | |||
308 | return self.build_targets[targetid] | ||
309 | |||
310 | def get_dependees(self, itemid): | ||
311 | """ | ||
312 | Return a list of targets which depend on item | ||
313 | """ | ||
314 | dependees = [] | ||
315 | for fnid in self.depids: | ||
316 | if itemid in self.depids[fnid]: | ||
317 | dependees.append(fnid) | ||
318 | return dependees | ||
319 | |||
320 | def get_dependees_str(self, item): | ||
321 | """ | ||
322 | Return a list of targets which depend on item as a user readable string | ||
323 | """ | ||
324 | itemid = self.getbuild_id(item) | ||
325 | dependees = [] | ||
326 | for fnid in self.depids: | ||
327 | if itemid in self.depids[fnid]: | ||
328 | dependees.append(self.fn_index[fnid]) | ||
329 | return dependees | ||
330 | |||
331 | def get_rdependees(self, itemid): | ||
332 | """ | ||
333 | Return a list of targets which depend on runtime item | ||
334 | """ | ||
335 | dependees = [] | ||
336 | for fnid in self.rdepids: | ||
337 | if itemid in self.rdepids[fnid]: | ||
338 | dependees.append(fnid) | ||
339 | return dependees | ||
340 | |||
341 | def get_rdependees_str(self, item): | ||
342 | """ | ||
343 | Return a list of targets which depend on runtime item as a user readable string | ||
344 | """ | ||
345 | itemid = self.getrun_id(item) | ||
346 | dependees = [] | ||
347 | for fnid in self.rdepids: | ||
348 | if itemid in self.rdepids[fnid]: | ||
349 | dependees.append(self.fn_index[fnid]) | ||
350 | return dependees | ||
351 | |||
352 | def add_provider(self, cfgData, dataCache, item): | ||
353 | try: | ||
354 | self.add_provider_internal(cfgData, dataCache, item) | ||
355 | except bb.providers.NoProvider: | ||
356 | if self.abort: | ||
357 | if self.get_rdependees_str(item): | ||
358 | bb.msg.error(bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (item, self.get_dependees_str(item))) | ||
359 | else: | ||
360 | bb.msg.error(bb.msg.domain.Provider, "Nothing PROVIDES '%s'" % (item)) | ||
361 | raise | ||
362 | targetid = self.getbuild_id(item) | ||
363 | self.remove_buildtarget(targetid) | ||
364 | |||
365 | self.mark_external_target(item) | ||
366 | |||
367 | def add_provider_internal(self, cfgData, dataCache, item): | ||
368 | """ | ||
369 | Add the providers of item to the task data | ||
370 | Mark entries were specifically added externally as against dependencies | ||
371 | added internally during dependency resolution | ||
372 | """ | ||
373 | |||
374 | if re_match_strings(item, dataCache.ignored_dependencies): | ||
375 | return | ||
376 | |||
377 | if not item in dataCache.providers: | ||
378 | if self.get_rdependees_str(item): | ||
379 | bb.msg.note(2, bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (item, self.get_dependees_str(item))) | ||
380 | else: | ||
381 | bb.msg.note(2, bb.msg.domain.Provider, "Nothing PROVIDES '%s'" % (item)) | ||
382 | bb.event.fire(bb.event.NoProvider(item), cfgData) | ||
383 | raise bb.providers.NoProvider(item) | ||
384 | |||
385 | if self.have_build_target(item): | ||
386 | return | ||
387 | |||
388 | all_p = dataCache.providers[item] | ||
389 | |||
390 | eligible, foundUnique = bb.providers.filterProviders(all_p, item, cfgData, dataCache) | ||
391 | eligible = [p for p in eligible if not self.getfn_id(p) in self.failed_fnids] | ||
392 | |||
393 | if not eligible: | ||
394 | bb.msg.note(2, bb.msg.domain.Provider, "No buildable provider PROVIDES '%s' but '%s' DEPENDS on or otherwise requires it. Enable debugging and see earlier logs to find unbuildable providers." % (item, self.get_dependees_str(item))) | ||
395 | bb.event.fire(bb.event.NoProvider(item), cfgData) | ||
396 | raise bb.providers.NoProvider(item) | ||
397 | |||
398 | if len(eligible) > 1 and foundUnique == False: | ||
399 | if item not in self.consider_msgs_cache: | ||
400 | providers_list = [] | ||
401 | for fn in eligible: | ||
402 | providers_list.append(dataCache.pkg_fn[fn]) | ||
403 | bb.msg.note(1, bb.msg.domain.Provider, "multiple providers are available for %s (%s);" % (item, ", ".join(providers_list))) | ||
404 | bb.msg.note(1, bb.msg.domain.Provider, "consider defining PREFERRED_PROVIDER_%s" % item) | ||
405 | bb.event.fire(bb.event.MultipleProviders(item, providers_list), cfgData) | ||
406 | self.consider_msgs_cache.append(item) | ||
407 | |||
408 | for fn in eligible: | ||
409 | fnid = self.getfn_id(fn) | ||
410 | if fnid in self.failed_fnids: | ||
411 | continue | ||
412 | bb.msg.debug(2, bb.msg.domain.Provider, "adding %s to satisfy %s" % (fn, item)) | ||
413 | self.add_build_target(fn, item) | ||
414 | self.add_tasks(fn, dataCache) | ||
415 | |||
416 | |||
417 | #item = dataCache.pkg_fn[fn] | ||
418 | |||
419 | def add_rprovider(self, cfgData, dataCache, item): | ||
420 | """ | ||
421 | Add the runtime providers of item to the task data | ||
422 | (takes item names from RDEPENDS/PACKAGES namespace) | ||
423 | """ | ||
424 | |||
425 | if re_match_strings(item, dataCache.ignored_dependencies): | ||
426 | return | ||
427 | |||
428 | if self.have_runtime_target(item): | ||
429 | return | ||
430 | |||
431 | all_p = bb.providers.getRuntimeProviders(dataCache, item) | ||
432 | |||
433 | if not all_p: | ||
434 | bb.msg.error(bb.msg.domain.Provider, "'%s' RDEPENDS/RRECOMMENDS or otherwise requires the runtime entity '%s' but it wasn't found in any PACKAGE or RPROVIDES variables" % (self.get_rdependees_str(item), item)) | ||
435 | bb.event.fire(bb.event.NoProvider(item, runtime=True), cfgData) | ||
436 | raise bb.providers.NoRProvider(item) | ||
437 | |||
438 | eligible, numberPreferred = bb.providers.filterProvidersRunTime(all_p, item, cfgData, dataCache) | ||
439 | eligible = [p for p in eligible if not self.getfn_id(p) in self.failed_fnids] | ||
440 | |||
441 | if not eligible: | ||
442 | bb.msg.error(bb.msg.domain.Provider, "'%s' RDEPENDS/RRECOMMENDS or otherwise requires the runtime entity '%s' but it wasn't found in any PACKAGE or RPROVIDES variables of any buildable targets.\nEnable debugging and see earlier logs to find unbuildable targets." % (self.get_rdependees_str(item), item)) | ||
443 | bb.event.fire(bb.event.NoProvider(item, runtime=True), cfgData) | ||
444 | raise bb.providers.NoRProvider(item) | ||
445 | |||
446 | if len(eligible) > 1 and numberPreferred == 0: | ||
447 | if item not in self.consider_msgs_cache: | ||
448 | providers_list = [] | ||
449 | for fn in eligible: | ||
450 | providers_list.append(dataCache.pkg_fn[fn]) | ||
451 | bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (%s);" % (item, ", ".join(providers_list))) | ||
452 | bb.msg.note(2, bb.msg.domain.Provider, "consider defining a PREFERRED_PROVIDER entry to match runtime %s" % item) | ||
453 | bb.event.fire(bb.event.MultipleProviders(item,providers_list, runtime=True), cfgData) | ||
454 | self.consider_msgs_cache.append(item) | ||
455 | |||
456 | if numberPreferred > 1: | ||
457 | if item not in self.consider_msgs_cache: | ||
458 | providers_list = [] | ||
459 | for fn in eligible: | ||
460 | providers_list.append(dataCache.pkg_fn[fn]) | ||
461 | bb.msg.note(2, bb.msg.domain.Provider, "multiple providers are available for runtime %s (top %s entries preferred) (%s);" % (item, numberPreferred, ", ".join(providers_list))) | ||
462 | bb.msg.note(2, bb.msg.domain.Provider, "consider defining only one PREFERRED_PROVIDER entry to match runtime %s" % item) | ||
463 | bb.event.fire(bb.event.MultipleProviders(item,providers_list, runtime=True), cfgData) | ||
464 | self.consider_msgs_cache.append(item) | ||
465 | |||
466 | # run through the list until we find one that we can build | ||
467 | for fn in eligible: | ||
468 | fnid = self.getfn_id(fn) | ||
469 | if fnid in self.failed_fnids: | ||
470 | continue | ||
471 | bb.msg.debug(2, bb.msg.domain.Provider, "adding '%s' to satisfy runtime '%s'" % (fn, item)) | ||
472 | self.add_runtime_target(fn, item) | ||
473 | self.add_tasks(fn, dataCache) | ||
474 | |||
475 | def fail_fnid(self, fnid, missing_list = []): | ||
476 | """ | ||
477 | Mark a file as failed (unbuildable) | ||
478 | Remove any references from build and runtime provider lists | ||
479 | |||
480 | missing_list, A list of missing requirements for this target | ||
481 | """ | ||
482 | if fnid in self.failed_fnids: | ||
483 | return | ||
484 | bb.msg.debug(1, bb.msg.domain.Provider, "File '%s' is unbuildable, removing..." % self.fn_index[fnid]) | ||
485 | self.failed_fnids.append(fnid) | ||
486 | for target in self.build_targets: | ||
487 | if fnid in self.build_targets[target]: | ||
488 | self.build_targets[target].remove(fnid) | ||
489 | if len(self.build_targets[target]) == 0: | ||
490 | self.remove_buildtarget(target, missing_list) | ||
491 | for target in self.run_targets: | ||
492 | if fnid in self.run_targets[target]: | ||
493 | self.run_targets[target].remove(fnid) | ||
494 | if len(self.run_targets[target]) == 0: | ||
495 | self.remove_runtarget(target, missing_list) | ||
496 | |||
497 | def remove_buildtarget(self, targetid, missing_list = []): | ||
498 | """ | ||
499 | Mark a build target as failed (unbuildable) | ||
500 | Trigger removal of any files that have this as a dependency | ||
501 | """ | ||
502 | if not missing_list: | ||
503 | missing_list = [self.build_names_index[targetid]] | ||
504 | else: | ||
505 | missing_list = [self.build_names_index[targetid]] + missing_list | ||
506 | bb.msg.note(2, bb.msg.domain.Provider, "Target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list)) | ||
507 | self.failed_deps.append(targetid) | ||
508 | dependees = self.get_dependees(targetid) | ||
509 | for fnid in dependees: | ||
510 | self.fail_fnid(fnid, missing_list) | ||
511 | for taskid in range(len(self.tasks_idepends)): | ||
512 | idepends = self.tasks_idepends[taskid] | ||
513 | for (idependid, idependtask) in idepends: | ||
514 | if idependid == targetid: | ||
515 | self.fail_fnid(self.tasks_fnid[taskid], missing_list) | ||
516 | |||
517 | if self.abort and targetid in self.external_targets: | ||
518 | bb.msg.error(bb.msg.domain.Provider, "Required build target '%s' has no buildable providers.\nMissing or unbuildable dependency chain was: %s" % (self.build_names_index[targetid], missing_list)) | ||
519 | raise bb.providers.NoProvider | ||
520 | |||
521 | def remove_runtarget(self, targetid, missing_list = []): | ||
522 | """ | ||
523 | Mark a run target as failed (unbuildable) | ||
524 | Trigger removal of any files that have this as a dependency | ||
525 | """ | ||
526 | if not missing_list: | ||
527 | missing_list = [self.run_names_index[targetid]] | ||
528 | else: | ||
529 | missing_list = [self.run_names_index[targetid]] + missing_list | ||
530 | |||
531 | bb.msg.note(1, bb.msg.domain.Provider, "Runtime target '%s' is unbuildable, removing...\nMissing or unbuildable dependency chain was: %s" % (self.run_names_index[targetid], missing_list)) | ||
532 | self.failed_rdeps.append(targetid) | ||
533 | dependees = self.get_rdependees(targetid) | ||
534 | for fnid in dependees: | ||
535 | self.fail_fnid(fnid, missing_list) | ||
536 | |||
537 | def add_unresolved(self, cfgData, dataCache): | ||
538 | """ | ||
539 | Resolve all unresolved build and runtime targets | ||
540 | """ | ||
541 | bb.msg.note(1, bb.msg.domain.TaskData, "Resolving any missing task queue dependencies") | ||
542 | while 1: | ||
543 | added = 0 | ||
544 | for target in self.get_unresolved_build_targets(dataCache): | ||
545 | try: | ||
546 | self.add_provider_internal(cfgData, dataCache, target) | ||
547 | added = added + 1 | ||
548 | except bb.providers.NoProvider: | ||
549 | targetid = self.getbuild_id(target) | ||
550 | if self.abort and targetid in self.external_targets: | ||
551 | if self.get_rdependees_str(target): | ||
552 | bb.msg.error(bb.msg.domain.Provider, "Nothing PROVIDES '%s' (but '%s' DEPENDS on or otherwise requires it)" % (target, self.get_dependees_str(target))) | ||
553 | else: | ||
554 | bb.msg.error(bb.msg.domain.Provider, "Nothing PROVIDES '%s'" % (target)) | ||
555 | raise | ||
556 | self.remove_buildtarget(targetid) | ||
557 | for target in self.get_unresolved_run_targets(dataCache): | ||
558 | try: | ||
559 | self.add_rprovider(cfgData, dataCache, target) | ||
560 | added = added + 1 | ||
561 | except bb.providers.NoRProvider: | ||
562 | self.remove_runtarget(self.getrun_id(target)) | ||
563 | bb.msg.debug(1, bb.msg.domain.TaskData, "Resolved " + str(added) + " extra dependecies") | ||
564 | if added == 0: | ||
565 | break | ||
566 | # self.dump_data() | ||
567 | |||
568 | def dump_data(self): | ||
569 | """ | ||
570 | Dump some debug information on the internal data structures | ||
571 | """ | ||
572 | bb.msg.debug(3, bb.msg.domain.TaskData, "build_names:") | ||
573 | bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.build_names_index)) | ||
574 | |||
575 | bb.msg.debug(3, bb.msg.domain.TaskData, "run_names:") | ||
576 | bb.msg.debug(3, bb.msg.domain.TaskData, ", ".join(self.run_names_index)) | ||
577 | |||
578 | bb.msg.debug(3, bb.msg.domain.TaskData, "build_targets:") | ||
579 | for buildid in range(len(self.build_names_index)): | ||
580 | target = self.build_names_index[buildid] | ||
581 | targets = "None" | ||
582 | if buildid in self.build_targets: | ||
583 | targets = self.build_targets[buildid] | ||
584 | bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (buildid, target, targets)) | ||
585 | |||
586 | bb.msg.debug(3, bb.msg.domain.TaskData, "run_targets:") | ||
587 | for runid in range(len(self.run_names_index)): | ||
588 | target = self.run_names_index[runid] | ||
589 | targets = "None" | ||
590 | if runid in self.run_targets: | ||
591 | targets = self.run_targets[runid] | ||
592 | bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s: %s" % (runid, target, targets)) | ||
593 | |||
594 | bb.msg.debug(3, bb.msg.domain.TaskData, "tasks:") | ||
595 | for task in range(len(self.tasks_name)): | ||
596 | bb.msg.debug(3, bb.msg.domain.TaskData, " (%s)%s - %s: %s" % ( | ||
597 | task, | ||
598 | self.fn_index[self.tasks_fnid[task]], | ||
599 | self.tasks_name[task], | ||
600 | self.tasks_tdepends[task])) | ||
601 | |||
602 | bb.msg.debug(3, bb.msg.domain.TaskData, "dependency ids (per fn):") | ||
603 | for fnid in self.depids: | ||
604 | bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.depids[fnid])) | ||
605 | |||
606 | bb.msg.debug(3, bb.msg.domain.TaskData, "runtime dependency ids (per fn):") | ||
607 | for fnid in self.rdepids: | ||
608 | bb.msg.debug(3, bb.msg.domain.TaskData, " %s %s: %s" % (fnid, self.fn_index[fnid], self.rdepids[fnid])) | ||
609 | |||
610 | |||
diff --git a/bitbake-dev/lib/bb/ui/__init__.py b/bitbake-dev/lib/bb/ui/__init__.py deleted file mode 100644 index c6a377a8e6..0000000000 --- a/bitbake-dev/lib/bb/ui/__init__.py +++ /dev/null | |||
@@ -1,18 +0,0 @@ | |||
1 | # | ||
2 | # BitBake UI Implementation | ||
3 | # | ||
4 | # Copyright (C) 2006-2007 Richard Purdie | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify | ||
7 | # it under the terms of the GNU General Public License version 2 as | ||
8 | # published by the Free Software Foundation. | ||
9 | # | ||
10 | # This program is distributed in the hope that it will be useful, | ||
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | # GNU General Public License for more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along | ||
16 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
diff --git a/bitbake-dev/lib/bb/ui/crumbs/__init__.py b/bitbake-dev/lib/bb/ui/crumbs/__init__.py deleted file mode 100644 index c6a377a8e6..0000000000 --- a/bitbake-dev/lib/bb/ui/crumbs/__init__.py +++ /dev/null | |||
@@ -1,18 +0,0 @@ | |||
1 | # | ||
2 | # BitBake UI Implementation | ||
3 | # | ||
4 | # Copyright (C) 2006-2007 Richard Purdie | ||
5 | # | ||
6 | # This program is free software; you can redistribute it and/or modify | ||
7 | # it under the terms of the GNU General Public License version 2 as | ||
8 | # published by the Free Software Foundation. | ||
9 | # | ||
10 | # This program is distributed in the hope that it will be useful, | ||
11 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
12 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
13 | # GNU General Public License for more details. | ||
14 | # | ||
15 | # You should have received a copy of the GNU General Public License along | ||
16 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
17 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
18 | |||
diff --git a/bitbake-dev/lib/bb/ui/crumbs/buildmanager.py b/bitbake-dev/lib/bb/ui/crumbs/buildmanager.py deleted file mode 100644 index f89e8eefd4..0000000000 --- a/bitbake-dev/lib/bb/ui/crumbs/buildmanager.py +++ /dev/null | |||
@@ -1,457 +0,0 @@ | |||
1 | # | ||
2 | # BitBake Graphical GTK User Interface | ||
3 | # | ||
4 | # Copyright (C) 2008 Intel Corporation | ||
5 | # | ||
6 | # Authored by Rob Bradford <rob@linux.intel.com> | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify | ||
9 | # it under the terms of the GNU General Public License version 2 as | ||
10 | # published by the Free Software Foundation. | ||
11 | # | ||
12 | # This program is distributed in the hope that it will be useful, | ||
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | # GNU General Public License for more details. | ||
16 | # | ||
17 | # You should have received a copy of the GNU General Public License along | ||
18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | |||
21 | import gtk | ||
22 | import gobject | ||
23 | import threading | ||
24 | import os | ||
25 | import datetime | ||
26 | import time | ||
27 | |||
28 | class BuildConfiguration: | ||
29 | """ Represents a potential *or* historic *or* concrete build. It | ||
30 | encompasses all the things that we need to tell bitbake to do to make it | ||
31 | build what we want it to build. | ||
32 | |||
33 | It also stored the metadata URL and the set of possible machines (and the | ||
34 | distros / images / uris for these. Apart from the metdata URL these are | ||
35 | not serialised to file (since they may be transient). In some ways this | ||
36 | functionality might be shifted to the loader class.""" | ||
37 | |||
38 | def __init__ (self): | ||
39 | self.metadata_url = None | ||
40 | |||
41 | # Tuple of (distros, image, urls) | ||
42 | self.machine_options = {} | ||
43 | |||
44 | self.machine = None | ||
45 | self.distro = None | ||
46 | self.image = None | ||
47 | self.urls = [] | ||
48 | self.extra_urls = [] | ||
49 | self.extra_pkgs = [] | ||
50 | |||
51 | def get_machines_model (self): | ||
52 | model = gtk.ListStore (gobject.TYPE_STRING) | ||
53 | for machine in self.machine_options.keys(): | ||
54 | model.append ([machine]) | ||
55 | |||
56 | return model | ||
57 | |||
58 | def get_distro_and_images_models (self, machine): | ||
59 | distro_model = gtk.ListStore (gobject.TYPE_STRING) | ||
60 | |||
61 | for distro in self.machine_options[machine][0]: | ||
62 | distro_model.append ([distro]) | ||
63 | |||
64 | image_model = gtk.ListStore (gobject.TYPE_STRING) | ||
65 | |||
66 | for image in self.machine_options[machine][1]: | ||
67 | image_model.append ([image]) | ||
68 | |||
69 | return (distro_model, image_model) | ||
70 | |||
71 | def get_repos (self): | ||
72 | self.urls = self.machine_options[self.machine][2] | ||
73 | return self.urls | ||
74 | |||
75 | # It might be a lot lot better if we stored these in like, bitbake conf | ||
76 | # file format. | ||
77 | @staticmethod | ||
78 | def load_from_file (filename): | ||
79 | f = open (filename, "r") | ||
80 | |||
81 | conf = BuildConfiguration() | ||
82 | for line in f.readlines(): | ||
83 | data = line.split (";")[1] | ||
84 | if (line.startswith ("metadata-url;")): | ||
85 | conf.metadata_url = data.strip() | ||
86 | continue | ||
87 | if (line.startswith ("url;")): | ||
88 | conf.urls += [data.strip()] | ||
89 | continue | ||
90 | if (line.startswith ("extra-url;")): | ||
91 | conf.extra_urls += [data.strip()] | ||
92 | continue | ||
93 | if (line.startswith ("machine;")): | ||
94 | conf.machine = data.strip() | ||
95 | continue | ||
96 | if (line.startswith ("distribution;")): | ||
97 | conf.distro = data.strip() | ||
98 | continue | ||
99 | if (line.startswith ("image;")): | ||
100 | conf.image = data.strip() | ||
101 | continue | ||
102 | |||
103 | f.close () | ||
104 | return conf | ||
105 | |||
106 | # Serialise to a file. This is part of the build process and we use this | ||
107 | # to be able to repeat a given build (using the same set of parameters) | ||
108 | # but also so that we can include the details of the image / machine / | ||
109 | # distro in the build manager tree view. | ||
110 | def write_to_file (self, filename): | ||
111 | f = open (filename, "w") | ||
112 | |||
113 | lines = [] | ||
114 | |||
115 | if (self.metadata_url): | ||
116 | lines += ["metadata-url;%s\n" % (self.metadata_url)] | ||
117 | |||
118 | for url in self.urls: | ||
119 | lines += ["url;%s\n" % (url)] | ||
120 | |||
121 | for url in self.extra_urls: | ||
122 | lines += ["extra-url;%s\n" % (url)] | ||
123 | |||
124 | if (self.machine): | ||
125 | lines += ["machine;%s\n" % (self.machine)] | ||
126 | |||
127 | if (self.distro): | ||
128 | lines += ["distribution;%s\n" % (self.distro)] | ||
129 | |||
130 | if (self.image): | ||
131 | lines += ["image;%s\n" % (self.image)] | ||
132 | |||
133 | f.writelines (lines) | ||
134 | f.close () | ||
135 | |||
136 | class BuildResult(gobject.GObject): | ||
137 | """ Represents an historic build. Perhaps not successful. But it includes | ||
138 | things such as the files that are in the directory (the output from the | ||
139 | build) as well as a deserialised BuildConfiguration file that is stored in | ||
140 | ".conf" in the directory for the build. | ||
141 | |||
142 | This is GObject so that it can be included in the TreeStore.""" | ||
143 | |||
144 | (STATE_COMPLETE, STATE_FAILED, STATE_ONGOING) = \ | ||
145 | (0, 1, 2) | ||
146 | |||
147 | def __init__ (self, parent, identifier): | ||
148 | gobject.GObject.__init__ (self) | ||
149 | self.date = None | ||
150 | |||
151 | self.files = [] | ||
152 | self.status = None | ||
153 | self.identifier = identifier | ||
154 | self.path = os.path.join (parent, identifier) | ||
155 | |||
156 | # Extract the date, since the directory name is of the | ||
157 | # format build-<year><month><day>-<ordinal> we can easily | ||
158 | # pull it out. | ||
159 | # TODO: Better to stat a file? | ||
160 | (_ , date, revision) = identifier.split ("-") | ||
161 | print date | ||
162 | |||
163 | year = int (date[0:4]) | ||
164 | month = int (date[4:6]) | ||
165 | day = int (date[6:8]) | ||
166 | |||
167 | self.date = datetime.date (year, month, day) | ||
168 | |||
169 | self.conf = None | ||
170 | |||
171 | # By default builds are STATE_FAILED unless we find a "complete" file | ||
172 | # in which case they are STATE_COMPLETE | ||
173 | self.state = BuildResult.STATE_FAILED | ||
174 | for file in os.listdir (self.path): | ||
175 | if (file.startswith (".conf")): | ||
176 | conffile = os.path.join (self.path, file) | ||
177 | self.conf = BuildConfiguration.load_from_file (conffile) | ||
178 | elif (file.startswith ("complete")): | ||
179 | self.state = BuildResult.STATE_COMPLETE | ||
180 | else: | ||
181 | self.add_file (file) | ||
182 | |||
183 | def add_file (self, file): | ||
184 | # Just add the file for now. Don't care about the type. | ||
185 | self.files += [(file, None)] | ||
186 | |||
187 | class BuildManagerModel (gtk.TreeStore): | ||
188 | """ Model for the BuildManagerTreeView. This derives from gtk.TreeStore | ||
189 | but it abstracts nicely what the columns mean and the setup of the columns | ||
190 | in the model. """ | ||
191 | |||
192 | (COL_IDENT, COL_DESC, COL_MACHINE, COL_DISTRO, COL_BUILD_RESULT, COL_DATE, COL_STATE) = \ | ||
193 | (0, 1, 2, 3, 4, 5, 6) | ||
194 | |||
195 | def __init__ (self): | ||
196 | gtk.TreeStore.__init__ (self, | ||
197 | gobject.TYPE_STRING, | ||
198 | gobject.TYPE_STRING, | ||
199 | gobject.TYPE_STRING, | ||
200 | gobject.TYPE_STRING, | ||
201 | gobject.TYPE_OBJECT, | ||
202 | gobject.TYPE_INT64, | ||
203 | gobject.TYPE_INT) | ||
204 | |||
205 | class BuildManager (gobject.GObject): | ||
206 | """ This class manages the historic builds that have been found in the | ||
207 | "results" directory but is also used for starting a new build.""" | ||
208 | |||
209 | __gsignals__ = { | ||
210 | 'population-finished' : (gobject.SIGNAL_RUN_LAST, | ||
211 | gobject.TYPE_NONE, | ||
212 | ()), | ||
213 | 'populate-error' : (gobject.SIGNAL_RUN_LAST, | ||
214 | gobject.TYPE_NONE, | ||
215 | ()) | ||
216 | } | ||
217 | |||
218 | def update_build_result (self, result, iter): | ||
219 | # Convert the date into something we can sort by. | ||
220 | date = long (time.mktime (result.date.timetuple())) | ||
221 | |||
222 | # Add a top level entry for the build | ||
223 | |||
224 | self.model.set (iter, | ||
225 | BuildManagerModel.COL_IDENT, result.identifier, | ||
226 | BuildManagerModel.COL_DESC, result.conf.image, | ||
227 | BuildManagerModel.COL_MACHINE, result.conf.machine, | ||
228 | BuildManagerModel.COL_DISTRO, result.conf.distro, | ||
229 | BuildManagerModel.COL_BUILD_RESULT, result, | ||
230 | BuildManagerModel.COL_DATE, date, | ||
231 | BuildManagerModel.COL_STATE, result.state) | ||
232 | |||
233 | # And then we use the files in the directory as the children for the | ||
234 | # top level iter. | ||
235 | for file in result.files: | ||
236 | self.model.append (iter, (None, file[0], None, None, None, date, -1)) | ||
237 | |||
238 | # This function is called as an idle by the BuildManagerPopulaterThread | ||
239 | def add_build_result (self, result): | ||
240 | gtk.gdk.threads_enter() | ||
241 | self.known_builds += [result] | ||
242 | |||
243 | self.update_build_result (result, self.model.append (None)) | ||
244 | |||
245 | gtk.gdk.threads_leave() | ||
246 | |||
247 | def notify_build_finished (self): | ||
248 | # This is a bit of a hack. If we have a running build running then we | ||
249 | # will have a row in the model in STATE_ONGOING. Find it and make it | ||
250 | # as if it was a proper historic build (well, it is completed now....) | ||
251 | |||
252 | # We need to use the iters here rather than the Python iterator | ||
253 | # interface to the model since we need to pass it into | ||
254 | # update_build_result | ||
255 | |||
256 | iter = self.model.get_iter_first() | ||
257 | |||
258 | while (iter): | ||
259 | (ident, state) = self.model.get(iter, | ||
260 | BuildManagerModel.COL_IDENT, | ||
261 | BuildManagerModel.COL_STATE) | ||
262 | |||
263 | if state == BuildResult.STATE_ONGOING: | ||
264 | result = BuildResult (self.results_directory, ident) | ||
265 | self.update_build_result (result, iter) | ||
266 | iter = self.model.iter_next(iter) | ||
267 | |||
268 | def notify_build_succeeded (self): | ||
269 | # Write the "complete" file so that when we create the BuildResult | ||
270 | # object we put into the model | ||
271 | |||
272 | complete_file_path = os.path.join (self.cur_build_directory, "complete") | ||
273 | f = file (complete_file_path, "w") | ||
274 | f.close() | ||
275 | self.notify_build_finished() | ||
276 | |||
277 | def notify_build_failed (self): | ||
278 | # Without a "complete" file then this will mark the build as failed: | ||
279 | self.notify_build_finished() | ||
280 | |||
281 | # This function is called as an idle | ||
282 | def emit_population_finished_signal (self): | ||
283 | gtk.gdk.threads_enter() | ||
284 | self.emit ("population-finished") | ||
285 | gtk.gdk.threads_leave() | ||
286 | |||
287 | class BuildManagerPopulaterThread (threading.Thread): | ||
288 | def __init__ (self, manager, directory): | ||
289 | threading.Thread.__init__ (self) | ||
290 | self.manager = manager | ||
291 | self.directory = directory | ||
292 | |||
293 | def run (self): | ||
294 | # For each of the "build-<...>" directories .. | ||
295 | |||
296 | if os.path.exists (self.directory): | ||
297 | for directory in os.listdir (self.directory): | ||
298 | |||
299 | if not directory.startswith ("build-"): | ||
300 | continue | ||
301 | |||
302 | build_result = BuildResult (self.directory, directory) | ||
303 | self.manager.add_build_result (build_result) | ||
304 | |||
305 | gobject.idle_add (BuildManager.emit_population_finished_signal, | ||
306 | self.manager) | ||
307 | |||
308 | def __init__ (self, server, results_directory): | ||
309 | gobject.GObject.__init__ (self) | ||
310 | |||
311 | # The builds that we've found from walking the result directory | ||
312 | self.known_builds = [] | ||
313 | |||
314 | # Save out the bitbake server, we need this for issuing commands to | ||
315 | # the cooker: | ||
316 | self.server = server | ||
317 | |||
318 | # The TreeStore that we use | ||
319 | self.model = BuildManagerModel () | ||
320 | |||
321 | # The results directory is where we create (and look for) the | ||
322 | # build-<xyz>-<n> directories. We need to populate ourselves from | ||
323 | # directory | ||
324 | self.results_directory = results_directory | ||
325 | self.populate_from_directory (self.results_directory) | ||
326 | |||
327 | def populate_from_directory (self, directory): | ||
328 | thread = BuildManager.BuildManagerPopulaterThread (self, directory) | ||
329 | thread.start() | ||
330 | |||
331 | # Come up with the name for the next build ident by combining "build-" | ||
332 | # with the date formatted as yyyymmdd and then an ordinal. We do this by | ||
333 | # an optimistic algorithm incrementing the ordinal if we find that it | ||
334 | # already exists. | ||
335 | def get_next_build_ident (self): | ||
336 | today = datetime.date.today () | ||
337 | datestr = str (today.year) + str (today.month) + str (today.day) | ||
338 | |||
339 | revision = 0 | ||
340 | test_name = "build-%s-%d" % (datestr, revision) | ||
341 | test_path = os.path.join (self.results_directory, test_name) | ||
342 | |||
343 | while (os.path.exists (test_path)): | ||
344 | revision += 1 | ||
345 | test_name = "build-%s-%d" % (datestr, revision) | ||
346 | test_path = os.path.join (self.results_directory, test_name) | ||
347 | |||
348 | return test_name | ||
349 | |||
350 | # Take a BuildConfiguration and then try and build it based on the | ||
351 | # parameters of that configuration. S | ||
352 | def do_build (self, conf): | ||
353 | server = self.server | ||
354 | |||
355 | # Work out the build directory. Note we actually create the | ||
356 | # directories here since we need to write the ".conf" file. Otherwise | ||
357 | # we could have relied on bitbake's builder thread to actually make | ||
358 | # the directories as it proceeds with the build. | ||
359 | ident = self.get_next_build_ident () | ||
360 | build_directory = os.path.join (self.results_directory, | ||
361 | ident) | ||
362 | self.cur_build_directory = build_directory | ||
363 | os.makedirs (build_directory) | ||
364 | |||
365 | conffile = os.path.join (build_directory, ".conf") | ||
366 | conf.write_to_file (conffile) | ||
367 | |||
368 | # Add a row to the model representing this ongoing build. It's kinda a | ||
369 | # fake entry. If this build completes or fails then this gets updated | ||
370 | # with the real stuff like the historic builds | ||
371 | date = long (time.time()) | ||
372 | self.model.append (None, (ident, conf.image, conf.machine, conf.distro, | ||
373 | None, date, BuildResult.STATE_ONGOING)) | ||
374 | try: | ||
375 | server.runCommand(["setVariable", "BUILD_IMAGES_FROM_FEEDS", 1]) | ||
376 | server.runCommand(["setVariable", "MACHINE", conf.machine]) | ||
377 | server.runCommand(["setVariable", "DISTRO", conf.distro]) | ||
378 | server.runCommand(["setVariable", "PACKAGE_CLASSES", "package_ipk"]) | ||
379 | server.runCommand(["setVariable", "BBFILES", \ | ||
380 | """${OEROOT}/meta/packages/*/*.bb ${OEROOT}/meta-moblin/packages/*/*.bb"""]) | ||
381 | server.runCommand(["setVariable", "TMPDIR", "${OEROOT}/build/tmp"]) | ||
382 | server.runCommand(["setVariable", "IPK_FEED_URIS", \ | ||
383 | " ".join(conf.get_repos())]) | ||
384 | server.runCommand(["setVariable", "DEPLOY_DIR_IMAGE", | ||
385 | build_directory]) | ||
386 | server.runCommand(["buildTargets", [conf.image], "rootfs"]) | ||
387 | |||
388 | except Exception, e: | ||
389 | print e | ||
390 | |||
391 | class BuildManagerTreeView (gtk.TreeView): | ||
392 | """ The tree view for the build manager. This shows the historic builds | ||
393 | and so forth. """ | ||
394 | |||
395 | # We use this function to control what goes in the cell since we store | ||
396 | # the date in the model as seconds since the epoch (for sorting) and so we | ||
397 | # need to make it human readable. | ||
398 | def date_format_custom_cell_data_func (self, col, cell, model, iter): | ||
399 | date = model.get (iter, BuildManagerModel.COL_DATE)[0] | ||
400 | datestr = time.strftime("%A %d %B %Y", time.localtime(date)) | ||
401 | cell.set_property ("text", datestr) | ||
402 | |||
403 | # This format function controls what goes in the cell. We use this to map | ||
404 | # the integer state to a string and also to colourise the text | ||
405 | def state_format_custom_cell_data_fun (self, col, cell, model, iter): | ||
406 | state = model.get (iter, BuildManagerModel.COL_STATE)[0] | ||
407 | |||
408 | if (state == BuildResult.STATE_ONGOING): | ||
409 | cell.set_property ("text", "Active") | ||
410 | cell.set_property ("foreground", "#000000") | ||
411 | elif (state == BuildResult.STATE_FAILED): | ||
412 | cell.set_property ("text", "Failed") | ||
413 | cell.set_property ("foreground", "#ff0000") | ||
414 | elif (state == BuildResult.STATE_COMPLETE): | ||
415 | cell.set_property ("text", "Complete") | ||
416 | cell.set_property ("foreground", "#00ff00") | ||
417 | else: | ||
418 | cell.set_property ("text", "") | ||
419 | |||
420 | def __init__ (self): | ||
421 | gtk.TreeView.__init__(self) | ||
422 | |||
423 | # Misc descriptiony thing | ||
424 | renderer = gtk.CellRendererText () | ||
425 | col = gtk.TreeViewColumn (None, renderer, | ||
426 | text=BuildManagerModel.COL_DESC) | ||
427 | self.append_column (col) | ||
428 | |||
429 | # Machine | ||
430 | renderer = gtk.CellRendererText () | ||
431 | col = gtk.TreeViewColumn ("Machine", renderer, | ||
432 | text=BuildManagerModel.COL_MACHINE) | ||
433 | self.append_column (col) | ||
434 | |||
435 | # distro | ||
436 | renderer = gtk.CellRendererText () | ||
437 | col = gtk.TreeViewColumn ("Distribution", renderer, | ||
438 | text=BuildManagerModel.COL_DISTRO) | ||
439 | self.append_column (col) | ||
440 | |||
441 | # date (using a custom function for formatting the cell contents it | ||
442 | # takes epoch -> human readable string) | ||
443 | renderer = gtk.CellRendererText () | ||
444 | col = gtk.TreeViewColumn ("Date", renderer, | ||
445 | text=BuildManagerModel.COL_DATE) | ||
446 | self.append_column (col) | ||
447 | col.set_cell_data_func (renderer, | ||
448 | self.date_format_custom_cell_data_func) | ||
449 | |||
450 | # For status. | ||
451 | renderer = gtk.CellRendererText () | ||
452 | col = gtk.TreeViewColumn ("Status", renderer, | ||
453 | text = BuildManagerModel.COL_STATE) | ||
454 | self.append_column (col) | ||
455 | col.set_cell_data_func (renderer, | ||
456 | self.state_format_custom_cell_data_fun) | ||
457 | |||
diff --git a/bitbake-dev/lib/bb/ui/crumbs/puccho.glade b/bitbake-dev/lib/bb/ui/crumbs/puccho.glade deleted file mode 100644 index d7553a6e14..0000000000 --- a/bitbake-dev/lib/bb/ui/crumbs/puccho.glade +++ /dev/null | |||
@@ -1,606 +0,0 @@ | |||
1 | <?xml version="1.0" encoding="UTF-8" standalone="no"?> | ||
2 | <!DOCTYPE glade-interface SYSTEM "glade-2.0.dtd"> | ||
3 | <!--Generated with glade3 3.4.5 on Mon Nov 10 12:24:12 2008 --> | ||
4 | <glade-interface> | ||
5 | <widget class="GtkDialog" id="build_dialog"> | ||
6 | <property name="title" translatable="yes">Start a build</property> | ||
7 | <property name="window_position">GTK_WIN_POS_CENTER_ON_PARENT</property> | ||
8 | <property name="type_hint">GDK_WINDOW_TYPE_HINT_DIALOG</property> | ||
9 | <property name="has_separator">False</property> | ||
10 | <child internal-child="vbox"> | ||
11 | <widget class="GtkVBox" id="dialog-vbox1"> | ||
12 | <property name="visible">True</property> | ||
13 | <property name="spacing">2</property> | ||
14 | <child> | ||
15 | <widget class="GtkTable" id="build_table"> | ||
16 | <property name="visible">True</property> | ||
17 | <property name="border_width">6</property> | ||
18 | <property name="n_rows">7</property> | ||
19 | <property name="n_columns">3</property> | ||
20 | <property name="column_spacing">5</property> | ||
21 | <property name="row_spacing">6</property> | ||
22 | <child> | ||
23 | <widget class="GtkAlignment" id="status_alignment"> | ||
24 | <property name="visible">True</property> | ||
25 | <property name="left_padding">12</property> | ||
26 | <child> | ||
27 | <widget class="GtkHBox" id="status_hbox"> | ||
28 | <property name="spacing">6</property> | ||
29 | <child> | ||
30 | <widget class="GtkImage" id="status_image"> | ||
31 | <property name="visible">True</property> | ||
32 | <property name="no_show_all">True</property> | ||
33 | <property name="xalign">0</property> | ||
34 | <property name="stock">gtk-dialog-error</property> | ||
35 | </widget> | ||
36 | <packing> | ||
37 | <property name="expand">False</property> | ||
38 | <property name="fill">False</property> | ||
39 | </packing> | ||
40 | </child> | ||
41 | <child> | ||
42 | <widget class="GtkLabel" id="status_label"> | ||
43 | <property name="visible">True</property> | ||
44 | <property name="xalign">0</property> | ||
45 | <property name="label" translatable="yes">If you see this text something is wrong...</property> | ||
46 | <property name="use_markup">True</property> | ||
47 | <property name="use_underline">True</property> | ||
48 | </widget> | ||
49 | <packing> | ||
50 | <property name="position">1</property> | ||
51 | </packing> | ||
52 | </child> | ||
53 | </widget> | ||
54 | </child> | ||
55 | </widget> | ||
56 | <packing> | ||
57 | <property name="right_attach">3</property> | ||
58 | <property name="top_attach">2</property> | ||
59 | <property name="bottom_attach">3</property> | ||
60 | </packing> | ||
61 | </child> | ||
62 | <child> | ||
63 | <widget class="GtkLabel" id="label2"> | ||
64 | <property name="visible">True</property> | ||
65 | <property name="xalign">0</property> | ||
66 | <property name="label" translatable="yes"><b>Build configuration</b></property> | ||
67 | <property name="use_markup">True</property> | ||
68 | </widget> | ||
69 | <packing> | ||
70 | <property name="right_attach">3</property> | ||
71 | <property name="top_attach">3</property> | ||
72 | <property name="bottom_attach">4</property> | ||
73 | <property name="y_options"></property> | ||
74 | </packing> | ||
75 | </child> | ||
76 | <child> | ||
77 | <widget class="GtkComboBox" id="image_combo"> | ||
78 | <property name="visible">True</property> | ||
79 | <property name="sensitive">False</property> | ||
80 | </widget> | ||
81 | <packing> | ||
82 | <property name="left_attach">1</property> | ||
83 | <property name="right_attach">2</property> | ||
84 | <property name="top_attach">6</property> | ||
85 | <property name="bottom_attach">7</property> | ||
86 | <property name="y_options"></property> | ||
87 | </packing> | ||
88 | </child> | ||
89 | <child> | ||
90 | <widget class="GtkLabel" id="image_label"> | ||
91 | <property name="visible">True</property> | ||
92 | <property name="sensitive">False</property> | ||
93 | <property name="xalign">0</property> | ||
94 | <property name="xpad">12</property> | ||
95 | <property name="label" translatable="yes">Image:</property> | ||
96 | </widget> | ||
97 | <packing> | ||
98 | <property name="top_attach">6</property> | ||
99 | <property name="bottom_attach">7</property> | ||
100 | <property name="y_options"></property> | ||
101 | </packing> | ||
102 | </child> | ||
103 | <child> | ||
104 | <widget class="GtkComboBox" id="distribution_combo"> | ||
105 | <property name="visible">True</property> | ||
106 | <property name="sensitive">False</property> | ||
107 | </widget> | ||
108 | <packing> | ||
109 | <property name="left_attach">1</property> | ||
110 | <property name="right_attach">2</property> | ||
111 | <property name="top_attach">5</property> | ||
112 | <property name="bottom_attach">6</property> | ||
113 | <property name="y_options"></property> | ||
114 | </packing> | ||
115 | </child> | ||
116 | <child> | ||
117 | <widget class="GtkLabel" id="distribution_label"> | ||
118 | <property name="visible">True</property> | ||
119 | <property name="sensitive">False</property> | ||
120 | <property name="xalign">0</property> | ||
121 | <property name="xpad">12</property> | ||
122 | <property name="label" translatable="yes">Distribution:</property> | ||
123 | </widget> | ||
124 | <packing> | ||
125 | <property name="top_attach">5</property> | ||
126 | <property name="bottom_attach">6</property> | ||
127 | <property name="y_options"></property> | ||
128 | </packing> | ||
129 | </child> | ||
130 | <child> | ||
131 | <widget class="GtkComboBox" id="machine_combo"> | ||
132 | <property name="visible">True</property> | ||
133 | <property name="sensitive">False</property> | ||
134 | </widget> | ||
135 | <packing> | ||
136 | <property name="left_attach">1</property> | ||
137 | <property name="right_attach">2</property> | ||
138 | <property name="top_attach">4</property> | ||
139 | <property name="bottom_attach">5</property> | ||
140 | <property name="y_options"></property> | ||
141 | </packing> | ||
142 | </child> | ||
143 | <child> | ||
144 | <widget class="GtkLabel" id="machine_label"> | ||
145 | <property name="visible">True</property> | ||
146 | <property name="sensitive">False</property> | ||
147 | <property name="xalign">0</property> | ||
148 | <property name="xpad">12</property> | ||
149 | <property name="label" translatable="yes">Machine:</property> | ||
150 | </widget> | ||
151 | <packing> | ||
152 | <property name="top_attach">4</property> | ||
153 | <property name="bottom_attach">5</property> | ||
154 | <property name="y_options"></property> | ||
155 | </packing> | ||
156 | </child> | ||
157 | <child> | ||
158 | <widget class="GtkButton" id="refresh_button"> | ||
159 | <property name="visible">True</property> | ||
160 | <property name="sensitive">False</property> | ||
161 | <property name="can_focus">True</property> | ||
162 | <property name="receives_default">True</property> | ||
163 | <property name="label" translatable="yes">gtk-refresh</property> | ||
164 | <property name="use_stock">True</property> | ||
165 | <property name="response_id">0</property> | ||
166 | </widget> | ||
167 | <packing> | ||
168 | <property name="left_attach">2</property> | ||
169 | <property name="right_attach">3</property> | ||
170 | <property name="top_attach">1</property> | ||
171 | <property name="bottom_attach">2</property> | ||
172 | <property name="y_options"></property> | ||
173 | </packing> | ||
174 | </child> | ||
175 | <child> | ||
176 | <widget class="GtkEntry" id="location_entry"> | ||
177 | <property name="visible">True</property> | ||
178 | <property name="can_focus">True</property> | ||
179 | <property name="width_chars">32</property> | ||
180 | </widget> | ||
181 | <packing> | ||
182 | <property name="left_attach">1</property> | ||
183 | <property name="right_attach">2</property> | ||
184 | <property name="top_attach">1</property> | ||
185 | <property name="bottom_attach">2</property> | ||
186 | <property name="y_options"></property> | ||
187 | </packing> | ||
188 | </child> | ||
189 | <child> | ||
190 | <widget class="GtkLabel" id="label3"> | ||
191 | <property name="visible">True</property> | ||
192 | <property name="xalign">0</property> | ||
193 | <property name="xpad">12</property> | ||
194 | <property name="label" translatable="yes">Location:</property> | ||
195 | </widget> | ||
196 | <packing> | ||
197 | <property name="top_attach">1</property> | ||
198 | <property name="bottom_attach">2</property> | ||
199 | <property name="y_options"></property> | ||
200 | </packing> | ||
201 | </child> | ||
202 | <child> | ||
203 | <widget class="GtkLabel" id="label1"> | ||
204 | <property name="visible">True</property> | ||
205 | <property name="xalign">0</property> | ||
206 | <property name="label" translatable="yes"><b>Repository</b></property> | ||
207 | <property name="use_markup">True</property> | ||
208 | </widget> | ||
209 | <packing> | ||
210 | <property name="right_attach">3</property> | ||
211 | <property name="y_options"></property> | ||
212 | </packing> | ||
213 | </child> | ||
214 | <child> | ||
215 | <widget class="GtkAlignment" id="alignment1"> | ||
216 | <property name="visible">True</property> | ||
217 | <child> | ||
218 | <placeholder/> | ||
219 | </child> | ||
220 | </widget> | ||
221 | <packing> | ||
222 | <property name="left_attach">2</property> | ||
223 | <property name="right_attach">3</property> | ||
224 | <property name="top_attach">4</property> | ||
225 | <property name="bottom_attach">5</property> | ||
226 | <property name="y_options"></property> | ||
227 | </packing> | ||
228 | </child> | ||
229 | <child> | ||
230 | <widget class="GtkAlignment" id="alignment2"> | ||
231 | <property name="visible">True</property> | ||
232 | <child> | ||
233 | <placeholder/> | ||
234 | </child> | ||
235 | </widget> | ||
236 | <packing> | ||
237 | <property name="left_attach">2</property> | ||
238 | <property name="right_attach">3</property> | ||
239 | <property name="top_attach">5</property> | ||
240 | <property name="bottom_attach">6</property> | ||
241 | <property name="y_options"></property> | ||
242 | </packing> | ||
243 | </child> | ||
244 | <child> | ||
245 | <widget class="GtkAlignment" id="alignment3"> | ||
246 | <property name="visible">True</property> | ||
247 | <child> | ||
248 | <placeholder/> | ||
249 | </child> | ||
250 | </widget> | ||
251 | <packing> | ||
252 | <property name="left_attach">2</property> | ||
253 | <property name="right_attach">3</property> | ||
254 | <property name="top_attach">6</property> | ||
255 | <property name="bottom_attach">7</property> | ||
256 | <property name="y_options"></property> | ||
257 | </packing> | ||
258 | </child> | ||
259 | </widget> | ||
260 | <packing> | ||
261 | <property name="position">1</property> | ||
262 | </packing> | ||
263 | </child> | ||
264 | <child internal-child="action_area"> | ||
265 | <widget class="GtkHButtonBox" id="dialog-action_area1"> | ||
266 | <property name="visible">True</property> | ||
267 | <property name="layout_style">GTK_BUTTONBOX_END</property> | ||
268 | <child> | ||
269 | <placeholder/> | ||
270 | </child> | ||
271 | <child> | ||
272 | <placeholder/> | ||
273 | </child> | ||
274 | <child> | ||
275 | <placeholder/> | ||
276 | </child> | ||
277 | </widget> | ||
278 | <packing> | ||
279 | <property name="expand">False</property> | ||
280 | <property name="pack_type">GTK_PACK_END</property> | ||
281 | </packing> | ||
282 | </child> | ||
283 | </widget> | ||
284 | </child> | ||
285 | </widget> | ||
286 | <widget class="GtkDialog" id="dialog2"> | ||
287 | <property name="window_position">GTK_WIN_POS_CENTER_ON_PARENT</property> | ||
288 | <property name="type_hint">GDK_WINDOW_TYPE_HINT_DIALOG</property> | ||
289 | <property name="has_separator">False</property> | ||
290 | <child internal-child="vbox"> | ||
291 | <widget class="GtkVBox" id="dialog-vbox2"> | ||
292 | <property name="visible">True</property> | ||
293 | <property name="spacing">2</property> | ||
294 | <child> | ||
295 | <widget class="GtkTable" id="table2"> | ||
296 | <property name="visible">True</property> | ||
297 | <property name="border_width">6</property> | ||
298 | <property name="n_rows">7</property> | ||
299 | <property name="n_columns">3</property> | ||
300 | <property name="column_spacing">6</property> | ||
301 | <property name="row_spacing">6</property> | ||
302 | <child> | ||
303 | <widget class="GtkLabel" id="label7"> | ||
304 | <property name="visible">True</property> | ||
305 | <property name="xalign">0</property> | ||
306 | <property name="label" translatable="yes"><b>Repositories</b></property> | ||
307 | <property name="use_markup">True</property> | ||
308 | </widget> | ||
309 | <packing> | ||
310 | <property name="right_attach">3</property> | ||
311 | <property name="y_options"></property> | ||
312 | </packing> | ||
313 | </child> | ||
314 | <child> | ||
315 | <widget class="GtkAlignment" id="alignment4"> | ||
316 | <property name="visible">True</property> | ||
317 | <property name="xalign">0</property> | ||
318 | <property name="left_padding">12</property> | ||
319 | <child> | ||
320 | <widget class="GtkScrolledWindow" id="scrolledwindow1"> | ||
321 | <property name="visible">True</property> | ||
322 | <property name="can_focus">True</property> | ||
323 | <property name="hscrollbar_policy">GTK_POLICY_AUTOMATIC</property> | ||
324 | <property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property> | ||
325 | <child> | ||
326 | <widget class="GtkTreeView" id="treeview1"> | ||
327 | <property name="visible">True</property> | ||
328 | <property name="can_focus">True</property> | ||
329 | <property name="headers_clickable">True</property> | ||
330 | </widget> | ||
331 | </child> | ||
332 | </widget> | ||
333 | </child> | ||
334 | </widget> | ||
335 | <packing> | ||
336 | <property name="right_attach">3</property> | ||
337 | <property name="top_attach">2</property> | ||
338 | <property name="bottom_attach">3</property> | ||
339 | <property name="y_options"></property> | ||
340 | </packing> | ||
341 | </child> | ||
342 | <child> | ||
343 | <widget class="GtkEntry" id="entry1"> | ||
344 | <property name="visible">True</property> | ||
345 | <property name="can_focus">True</property> | ||
346 | </widget> | ||
347 | <packing> | ||
348 | <property name="left_attach">1</property> | ||
349 | <property name="right_attach">3</property> | ||
350 | <property name="top_attach">1</property> | ||
351 | <property name="bottom_attach">2</property> | ||
352 | <property name="y_options"></property> | ||
353 | </packing> | ||
354 | </child> | ||
355 | <child> | ||
356 | <widget class="GtkLabel" id="label9"> | ||
357 | <property name="visible">True</property> | ||
358 | <property name="xalign">0</property> | ||
359 | <property name="label" translatable="yes"><b>Additional packages</b></property> | ||
360 | <property name="use_markup">True</property> | ||
361 | </widget> | ||
362 | <packing> | ||
363 | <property name="right_attach">3</property> | ||
364 | <property name="top_attach">4</property> | ||
365 | <property name="bottom_attach">5</property> | ||
366 | <property name="y_options"></property> | ||
367 | </packing> | ||
368 | </child> | ||
369 | <child> | ||
370 | <widget class="GtkAlignment" id="alignment6"> | ||
371 | <property name="visible">True</property> | ||
372 | <property name="xalign">0</property> | ||
373 | <property name="xscale">0</property> | ||
374 | <child> | ||
375 | <widget class="GtkLabel" id="label8"> | ||
376 | <property name="visible">True</property> | ||
377 | <property name="xalign">0</property> | ||
378 | <property name="yalign">0</property> | ||
379 | <property name="xpad">12</property> | ||
380 | <property name="label" translatable="yes">Location: </property> | ||
381 | </widget> | ||
382 | </child> | ||
383 | </widget> | ||
384 | <packing> | ||
385 | <property name="top_attach">1</property> | ||
386 | <property name="bottom_attach">2</property> | ||
387 | <property name="y_options"></property> | ||
388 | </packing> | ||
389 | </child> | ||
390 | <child> | ||
391 | <widget class="GtkAlignment" id="alignment7"> | ||
392 | <property name="visible">True</property> | ||
393 | <property name="xalign">1</property> | ||
394 | <property name="xscale">0</property> | ||
395 | <child> | ||
396 | <widget class="GtkHButtonBox" id="hbuttonbox1"> | ||
397 | <property name="visible">True</property> | ||
398 | <property name="spacing">5</property> | ||
399 | <child> | ||
400 | <widget class="GtkButton" id="button7"> | ||
401 | <property name="visible">True</property> | ||
402 | <property name="can_focus">True</property> | ||
403 | <property name="receives_default">True</property> | ||
404 | <property name="label" translatable="yes">gtk-remove</property> | ||
405 | <property name="use_stock">True</property> | ||
406 | <property name="response_id">0</property> | ||
407 | </widget> | ||
408 | </child> | ||
409 | <child> | ||
410 | <widget class="GtkButton" id="button6"> | ||
411 | <property name="visible">True</property> | ||
412 | <property name="can_focus">True</property> | ||
413 | <property name="receives_default">True</property> | ||
414 | <property name="label" translatable="yes">gtk-edit</property> | ||
415 | <property name="use_stock">True</property> | ||
416 | <property name="response_id">0</property> | ||
417 | </widget> | ||
418 | <packing> | ||
419 | <property name="position">1</property> | ||
420 | </packing> | ||
421 | </child> | ||
422 | <child> | ||
423 | <widget class="GtkButton" id="button5"> | ||
424 | <property name="visible">True</property> | ||
425 | <property name="can_focus">True</property> | ||
426 | <property name="receives_default">True</property> | ||
427 | <property name="label" translatable="yes">gtk-add</property> | ||
428 | <property name="use_stock">True</property> | ||
429 | <property name="response_id">0</property> | ||
430 | </widget> | ||
431 | <packing> | ||
432 | <property name="position">2</property> | ||
433 | </packing> | ||
434 | </child> | ||
435 | </widget> | ||
436 | </child> | ||
437 | </widget> | ||
438 | <packing> | ||
439 | <property name="left_attach">1</property> | ||
440 | <property name="right_attach">3</property> | ||
441 | <property name="top_attach">3</property> | ||
442 | <property name="bottom_attach">4</property> | ||
443 | <property name="y_options"></property> | ||
444 | </packing> | ||
445 | </child> | ||
446 | <child> | ||
447 | <widget class="GtkAlignment" id="alignment5"> | ||
448 | <property name="visible">True</property> | ||
449 | <child> | ||
450 | <placeholder/> | ||
451 | </child> | ||
452 | </widget> | ||
453 | <packing> | ||
454 | <property name="top_attach">3</property> | ||
455 | <property name="bottom_attach">4</property> | ||
456 | <property name="y_options"></property> | ||
457 | </packing> | ||
458 | </child> | ||
459 | <child> | ||
460 | <widget class="GtkLabel" id="label10"> | ||
461 | <property name="visible">True</property> | ||
462 | <property name="xalign">0</property> | ||
463 | <property name="yalign">0</property> | ||
464 | <property name="xpad">12</property> | ||
465 | <property name="label" translatable="yes">Search:</property> | ||
466 | </widget> | ||
467 | <packing> | ||
468 | <property name="top_attach">5</property> | ||
469 | <property name="bottom_attach">6</property> | ||
470 | <property name="y_options"></property> | ||
471 | </packing> | ||
472 | </child> | ||
473 | <child> | ||
474 | <widget class="GtkEntry" id="entry2"> | ||
475 | <property name="visible">True</property> | ||
476 | <property name="can_focus">True</property> | ||
477 | </widget> | ||
478 | <packing> | ||
479 | <property name="left_attach">1</property> | ||
480 | <property name="right_attach">3</property> | ||
481 | <property name="top_attach">5</property> | ||
482 | <property name="bottom_attach">6</property> | ||
483 | <property name="y_options"></property> | ||
484 | </packing> | ||
485 | </child> | ||
486 | <child> | ||
487 | <widget class="GtkAlignment" id="alignment8"> | ||
488 | <property name="visible">True</property> | ||
489 | <property name="xalign">0</property> | ||
490 | <property name="left_padding">12</property> | ||
491 | <child> | ||
492 | <widget class="GtkScrolledWindow" id="scrolledwindow2"> | ||
493 | <property name="visible">True</property> | ||
494 | <property name="can_focus">True</property> | ||
495 | <property name="hscrollbar_policy">GTK_POLICY_AUTOMATIC</property> | ||
496 | <property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property> | ||
497 | <child> | ||
498 | <widget class="GtkTreeView" id="treeview2"> | ||
499 | <property name="visible">True</property> | ||
500 | <property name="can_focus">True</property> | ||
501 | <property name="headers_clickable">True</property> | ||
502 | </widget> | ||
503 | </child> | ||
504 | </widget> | ||
505 | </child> | ||
506 | </widget> | ||
507 | <packing> | ||
508 | <property name="right_attach">3</property> | ||
509 | <property name="top_attach">6</property> | ||
510 | <property name="bottom_attach">7</property> | ||
511 | <property name="y_options"></property> | ||
512 | </packing> | ||
513 | </child> | ||
514 | </widget> | ||
515 | <packing> | ||
516 | <property name="position">1</property> | ||
517 | </packing> | ||
518 | </child> | ||
519 | <child internal-child="action_area"> | ||
520 | <widget class="GtkHButtonBox" id="dialog-action_area2"> | ||
521 | <property name="visible">True</property> | ||
522 | <property name="layout_style">GTK_BUTTONBOX_END</property> | ||
523 | <child> | ||
524 | <widget class="GtkButton" id="button4"> | ||
525 | <property name="visible">True</property> | ||
526 | <property name="can_focus">True</property> | ||
527 | <property name="receives_default">True</property> | ||
528 | <property name="label" translatable="yes">gtk-close</property> | ||
529 | <property name="use_stock">True</property> | ||
530 | <property name="response_id">0</property> | ||
531 | </widget> | ||
532 | </child> | ||
533 | </widget> | ||
534 | <packing> | ||
535 | <property name="expand">False</property> | ||
536 | <property name="pack_type">GTK_PACK_END</property> | ||
537 | </packing> | ||
538 | </child> | ||
539 | </widget> | ||
540 | </child> | ||
541 | </widget> | ||
542 | <widget class="GtkWindow" id="main_window"> | ||
543 | <child> | ||
544 | <widget class="GtkVBox" id="main_window_vbox"> | ||
545 | <property name="visible">True</property> | ||
546 | <child> | ||
547 | <widget class="GtkToolbar" id="main_toolbar"> | ||
548 | <property name="visible">True</property> | ||
549 | <child> | ||
550 | <widget class="GtkToolButton" id="main_toolbutton_build"> | ||
551 | <property name="visible">True</property> | ||
552 | <property name="label" translatable="yes">Build</property> | ||
553 | <property name="stock_id">gtk-execute</property> | ||
554 | </widget> | ||
555 | <packing> | ||
556 | <property name="expand">False</property> | ||
557 | </packing> | ||
558 | </child> | ||
559 | </widget> | ||
560 | <packing> | ||
561 | <property name="expand">False</property> | ||
562 | </packing> | ||
563 | </child> | ||
564 | <child> | ||
565 | <widget class="GtkVPaned" id="vpaned1"> | ||
566 | <property name="visible">True</property> | ||
567 | <property name="can_focus">True</property> | ||
568 | <child> | ||
569 | <widget class="GtkScrolledWindow" id="results_scrolledwindow"> | ||
570 | <property name="visible">True</property> | ||
571 | <property name="can_focus">True</property> | ||
572 | <property name="hscrollbar_policy">GTK_POLICY_AUTOMATIC</property> | ||
573 | <property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property> | ||
574 | <child> | ||
575 | <placeholder/> | ||
576 | </child> | ||
577 | </widget> | ||
578 | <packing> | ||
579 | <property name="resize">False</property> | ||
580 | <property name="shrink">True</property> | ||
581 | </packing> | ||
582 | </child> | ||
583 | <child> | ||
584 | <widget class="GtkScrolledWindow" id="progress_scrolledwindow"> | ||
585 | <property name="visible">True</property> | ||
586 | <property name="can_focus">True</property> | ||
587 | <property name="hscrollbar_policy">GTK_POLICY_AUTOMATIC</property> | ||
588 | <property name="vscrollbar_policy">GTK_POLICY_AUTOMATIC</property> | ||
589 | <child> | ||
590 | <placeholder/> | ||
591 | </child> | ||
592 | </widget> | ||
593 | <packing> | ||
594 | <property name="resize">True</property> | ||
595 | <property name="shrink">True</property> | ||
596 | </packing> | ||
597 | </child> | ||
598 | </widget> | ||
599 | <packing> | ||
600 | <property name="position">1</property> | ||
601 | </packing> | ||
602 | </child> | ||
603 | </widget> | ||
604 | </child> | ||
605 | </widget> | ||
606 | </glade-interface> | ||
diff --git a/bitbake-dev/lib/bb/ui/crumbs/runningbuild.py b/bitbake-dev/lib/bb/ui/crumbs/runningbuild.py deleted file mode 100644 index 401559255b..0000000000 --- a/bitbake-dev/lib/bb/ui/crumbs/runningbuild.py +++ /dev/null | |||
@@ -1,180 +0,0 @@ | |||
1 | # | ||
2 | # BitBake Graphical GTK User Interface | ||
3 | # | ||
4 | # Copyright (C) 2008 Intel Corporation | ||
5 | # | ||
6 | # Authored by Rob Bradford <rob@linux.intel.com> | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify | ||
9 | # it under the terms of the GNU General Public License version 2 as | ||
10 | # published by the Free Software Foundation. | ||
11 | # | ||
12 | # This program is distributed in the hope that it will be useful, | ||
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | # GNU General Public License for more details. | ||
16 | # | ||
17 | # You should have received a copy of the GNU General Public License along | ||
18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | |||
21 | import gtk | ||
22 | import gobject | ||
23 | |||
24 | class RunningBuildModel (gtk.TreeStore): | ||
25 | (COL_TYPE, COL_PACKAGE, COL_TASK, COL_MESSAGE, COL_ICON, COL_ACTIVE) = (0, 1, 2, 3, 4, 5) | ||
26 | def __init__ (self): | ||
27 | gtk.TreeStore.__init__ (self, | ||
28 | gobject.TYPE_STRING, | ||
29 | gobject.TYPE_STRING, | ||
30 | gobject.TYPE_STRING, | ||
31 | gobject.TYPE_STRING, | ||
32 | gobject.TYPE_STRING, | ||
33 | gobject.TYPE_BOOLEAN) | ||
34 | |||
35 | class RunningBuild (gobject.GObject): | ||
36 | __gsignals__ = { | ||
37 | 'build-succeeded' : (gobject.SIGNAL_RUN_LAST, | ||
38 | gobject.TYPE_NONE, | ||
39 | ()), | ||
40 | 'build-failed' : (gobject.SIGNAL_RUN_LAST, | ||
41 | gobject.TYPE_NONE, | ||
42 | ()) | ||
43 | } | ||
44 | pids_to_task = {} | ||
45 | tasks_to_iter = {} | ||
46 | |||
47 | def __init__ (self): | ||
48 | gobject.GObject.__init__ (self) | ||
49 | self.model = RunningBuildModel() | ||
50 | |||
51 | def handle_event (self, event): | ||
52 | # Handle an event from the event queue, this may result in updating | ||
53 | # the model and thus the UI. Or it may be to tell us that the build | ||
54 | # has finished successfully (or not, as the case may be.) | ||
55 | |||
56 | parent = None | ||
57 | pid = 0 | ||
58 | package = None | ||
59 | task = None | ||
60 | |||
61 | # If we have a pid attached to this message/event try and get the | ||
62 | # (package, task) pair for it. If we get that then get the parent iter | ||
63 | # for the message. | ||
64 | if hassattr(event, 'pid'): | ||
65 | pid = event.pid | ||
66 | if self.pids_to_task.has_key(pid): | ||
67 | (package, task) = self.pids_to_task[pid] | ||
68 | parent = self.tasks_to_iter[(package, task)] | ||
69 | |||
70 | if isinstance(event, bb.msg.Msg): | ||
71 | # Set a pretty icon for the message based on it's type. | ||
72 | if isinstance(event, bb.msg.MsgWarn): | ||
73 | icon = "dialog-warning" | ||
74 | elif isinstance(event, bb.msg.MsgErr): | ||
75 | icon = "dialog-error" | ||
76 | else: | ||
77 | icon = None | ||
78 | |||
79 | # Ignore the "Running task i of n .." messages | ||
80 | if (event._message.startswith ("Running task")): | ||
81 | return | ||
82 | |||
83 | # Add the message to the tree either at the top level if parent is | ||
84 | # None otherwise as a descendent of a task. | ||
85 | self.model.append (parent, | ||
86 | (event.__name__.split()[-1], # e.g. MsgWarn, MsgError | ||
87 | package, | ||
88 | task, | ||
89 | event._message, | ||
90 | icon, | ||
91 | False)) | ||
92 | elif isinstance(event, bb.build.TaskStarted): | ||
93 | (package, task) = (event._package, event._task) | ||
94 | |||
95 | # Save out this PID. | ||
96 | self.pids_to_task[pid] = (package,task) | ||
97 | |||
98 | # Check if we already have this package in our model. If so then | ||
99 | # that can be the parent for the task. Otherwise we create a new | ||
100 | # top level for the package. | ||
101 | if (self.tasks_to_iter.has_key ((package, None))): | ||
102 | parent = self.tasks_to_iter[(package, None)] | ||
103 | else: | ||
104 | parent = self.model.append (None, (None, | ||
105 | package, | ||
106 | None, | ||
107 | "Package: %s" % (package), | ||
108 | None, | ||
109 | False)) | ||
110 | self.tasks_to_iter[(package, None)] = parent | ||
111 | |||
112 | # Because this parent package now has an active child mark it as | ||
113 | # such. | ||
114 | self.model.set(parent, self.model.COL_ICON, "gtk-execute") | ||
115 | |||
116 | # Add an entry in the model for this task | ||
117 | i = self.model.append (parent, (None, | ||
118 | package, | ||
119 | task, | ||
120 | "Task: %s" % (task), | ||
121 | None, | ||
122 | False)) | ||
123 | |||
124 | # Save out the iter so that we can find it when we have a message | ||
125 | # that we need to attach to a task. | ||
126 | self.tasks_to_iter[(package, task)] = i | ||
127 | |||
128 | # Mark this task as active. | ||
129 | self.model.set(i, self.model.COL_ICON, "gtk-execute") | ||
130 | |||
131 | elif isinstance(event, bb.build.Task): | ||
132 | |||
133 | if isinstance(event, bb.build.TaskFailed): | ||
134 | # Mark the task as failed | ||
135 | i = self.tasks_to_iter[(package, task)] | ||
136 | self.model.set(i, self.model.COL_ICON, "dialog-error") | ||
137 | |||
138 | # Mark the parent package as failed | ||
139 | i = self.tasks_to_iter[(package, None)] | ||
140 | self.model.set(i, self.model.COL_ICON, "dialog-error") | ||
141 | else: | ||
142 | # Mark the task as inactive | ||
143 | i = self.tasks_to_iter[(package, task)] | ||
144 | self.model.set(i, self.model.COL_ICON, None) | ||
145 | |||
146 | # Mark the parent package as inactive | ||
147 | i = self.tasks_to_iter[(package, None)] | ||
148 | self.model.set(i, self.model.COL_ICON, None) | ||
149 | |||
150 | |||
151 | # Clear the iters and the pids since when the task goes away the | ||
152 | # pid will no longer be used for messages | ||
153 | del self.tasks_to_iter[(package, task)] | ||
154 | del self.pids_to_task[pid] | ||
155 | |||
156 | elif isinstance(event, bb.event.BuildCompleted): | ||
157 | failures = int (event._failures) | ||
158 | |||
159 | # Emit the appropriate signal depending on the number of failures | ||
160 | if (failures > 1): | ||
161 | self.emit ("build-failed") | ||
162 | else: | ||
163 | self.emit ("build-succeeded") | ||
164 | |||
165 | class RunningBuildTreeView (gtk.TreeView): | ||
166 | def __init__ (self): | ||
167 | gtk.TreeView.__init__ (self) | ||
168 | |||
169 | # The icon that indicates whether we're building or failed. | ||
170 | renderer = gtk.CellRendererPixbuf () | ||
171 | col = gtk.TreeViewColumn ("Status", renderer) | ||
172 | col.add_attribute (renderer, "icon-name", 4) | ||
173 | self.append_column (col) | ||
174 | |||
175 | # The message of the build. | ||
176 | renderer = gtk.CellRendererText () | ||
177 | col = gtk.TreeViewColumn ("Message", renderer, text=3) | ||
178 | self.append_column (col) | ||
179 | |||
180 | |||
diff --git a/bitbake-dev/lib/bb/ui/depexp.py b/bitbake-dev/lib/bb/ui/depexp.py deleted file mode 100644 index cfa5b6564e..0000000000 --- a/bitbake-dev/lib/bb/ui/depexp.py +++ /dev/null | |||
@@ -1,272 +0,0 @@ | |||
1 | # | ||
2 | # BitBake Graphical GTK based Dependency Explorer | ||
3 | # | ||
4 | # Copyright (C) 2007 Ross Burton | ||
5 | # Copyright (C) 2007 - 2008 Richard Purdie | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | |||
20 | import gobject | ||
21 | import gtk | ||
22 | import threading | ||
23 | import xmlrpclib | ||
24 | |||
25 | # Package Model | ||
26 | (COL_PKG_NAME) = (0) | ||
27 | |||
28 | # Dependency Model | ||
29 | (TYPE_DEP, TYPE_RDEP) = (0, 1) | ||
30 | (COL_DEP_TYPE, COL_DEP_PARENT, COL_DEP_PACKAGE) = (0, 1, 2) | ||
31 | |||
32 | class PackageDepView(gtk.TreeView): | ||
33 | def __init__(self, model, dep_type, label): | ||
34 | gtk.TreeView.__init__(self) | ||
35 | self.current = None | ||
36 | self.dep_type = dep_type | ||
37 | self.filter_model = model.filter_new() | ||
38 | self.filter_model.set_visible_func(self._filter) | ||
39 | self.set_model(self.filter_model) | ||
40 | #self.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE) | ||
41 | self.append_column(gtk.TreeViewColumn(label, gtk.CellRendererText(), text=COL_DEP_PACKAGE)) | ||
42 | |||
43 | def _filter(self, model, iter): | ||
44 | (this_type, package) = model.get(iter, COL_DEP_TYPE, COL_DEP_PARENT) | ||
45 | if this_type != self.dep_type: return False | ||
46 | return package == self.current | ||
47 | |||
48 | def set_current_package(self, package): | ||
49 | self.current = package | ||
50 | self.filter_model.refilter() | ||
51 | |||
52 | class PackageReverseDepView(gtk.TreeView): | ||
53 | def __init__(self, model, label): | ||
54 | gtk.TreeView.__init__(self) | ||
55 | self.current = None | ||
56 | self.filter_model = model.filter_new() | ||
57 | self.filter_model.set_visible_func(self._filter) | ||
58 | self.set_model(self.filter_model) | ||
59 | self.append_column(gtk.TreeViewColumn(label, gtk.CellRendererText(), text=COL_DEP_PARENT)) | ||
60 | |||
61 | def _filter(self, model, iter): | ||
62 | package = model.get_value(iter, COL_DEP_PACKAGE) | ||
63 | return package == self.current | ||
64 | |||
65 | def set_current_package(self, package): | ||
66 | self.current = package | ||
67 | self.filter_model.refilter() | ||
68 | |||
69 | class DepExplorer(gtk.Window): | ||
70 | def __init__(self): | ||
71 | gtk.Window.__init__(self) | ||
72 | self.set_title("Dependency Explorer") | ||
73 | self.set_default_size(500, 500) | ||
74 | self.connect("delete-event", gtk.main_quit) | ||
75 | |||
76 | # Create the data models | ||
77 | self.pkg_model = gtk.ListStore(gobject.TYPE_STRING) | ||
78 | self.depends_model = gtk.ListStore(gobject.TYPE_INT, gobject.TYPE_STRING, gobject.TYPE_STRING) | ||
79 | |||
80 | pane = gtk.HPaned() | ||
81 | pane.set_position(250) | ||
82 | self.add(pane) | ||
83 | |||
84 | # The master list of packages | ||
85 | scrolled = gtk.ScrolledWindow() | ||
86 | scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) | ||
87 | scrolled.set_shadow_type(gtk.SHADOW_IN) | ||
88 | self.pkg_treeview = gtk.TreeView(self.pkg_model) | ||
89 | self.pkg_treeview.get_selection().connect("changed", self.on_cursor_changed) | ||
90 | self.pkg_treeview.append_column(gtk.TreeViewColumn("Package", gtk.CellRendererText(), text=COL_PKG_NAME)) | ||
91 | pane.add1(scrolled) | ||
92 | scrolled.add(self.pkg_treeview) | ||
93 | |||
94 | box = gtk.VBox(homogeneous=True, spacing=4) | ||
95 | |||
96 | # Runtime Depends | ||
97 | scrolled = gtk.ScrolledWindow() | ||
98 | scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) | ||
99 | scrolled.set_shadow_type(gtk.SHADOW_IN) | ||
100 | self.rdep_treeview = PackageDepView(self.depends_model, TYPE_RDEP, "Runtime Depends") | ||
101 | self.rdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE) | ||
102 | scrolled.add(self.rdep_treeview) | ||
103 | box.add(scrolled) | ||
104 | |||
105 | # Build Depends | ||
106 | scrolled = gtk.ScrolledWindow() | ||
107 | scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) | ||
108 | scrolled.set_shadow_type(gtk.SHADOW_IN) | ||
109 | self.dep_treeview = PackageDepView(self.depends_model, TYPE_DEP, "Build Depends") | ||
110 | self.dep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PACKAGE) | ||
111 | scrolled.add(self.dep_treeview) | ||
112 | box.add(scrolled) | ||
113 | pane.add2(box) | ||
114 | |||
115 | # Reverse Depends | ||
116 | scrolled = gtk.ScrolledWindow() | ||
117 | scrolled.set_policy(gtk.POLICY_AUTOMATIC, gtk.POLICY_AUTOMATIC) | ||
118 | scrolled.set_shadow_type(gtk.SHADOW_IN) | ||
119 | self.revdep_treeview = PackageReverseDepView(self.depends_model, "Reverse Depends") | ||
120 | self.revdep_treeview.connect("row-activated", self.on_package_activated, COL_DEP_PARENT) | ||
121 | scrolled.add(self.revdep_treeview) | ||
122 | box.add(scrolled) | ||
123 | pane.add2(box) | ||
124 | |||
125 | self.show_all() | ||
126 | |||
127 | def on_package_activated(self, treeview, path, column, data_col): | ||
128 | model = treeview.get_model() | ||
129 | package = model.get_value(model.get_iter(path), data_col) | ||
130 | |||
131 | pkg_path = [] | ||
132 | def finder(model, path, iter, needle): | ||
133 | package = model.get_value(iter, COL_PKG_NAME) | ||
134 | if package == needle: | ||
135 | pkg_path.append(path) | ||
136 | return True | ||
137 | else: | ||
138 | return False | ||
139 | self.pkg_model.foreach(finder, package) | ||
140 | if pkg_path: | ||
141 | self.pkg_treeview.get_selection().select_path(pkg_path[0]) | ||
142 | self.pkg_treeview.scroll_to_cell(pkg_path[0]) | ||
143 | |||
144 | def on_cursor_changed(self, selection): | ||
145 | (model, it) = selection.get_selected() | ||
146 | if iter is None: | ||
147 | current_package = None | ||
148 | else: | ||
149 | current_package = model.get_value(it, COL_PKG_NAME) | ||
150 | self.rdep_treeview.set_current_package(current_package) | ||
151 | self.dep_treeview.set_current_package(current_package) | ||
152 | self.revdep_treeview.set_current_package(current_package) | ||
153 | |||
154 | |||
155 | def parse(depgraph, pkg_model, depends_model): | ||
156 | |||
157 | for package in depgraph["pn"]: | ||
158 | pkg_model.set(pkg_model.append(), COL_PKG_NAME, package) | ||
159 | |||
160 | for package in depgraph["depends"]: | ||
161 | for depend in depgraph["depends"][package]: | ||
162 | depends_model.set (depends_model.append(), | ||
163 | COL_DEP_TYPE, TYPE_DEP, | ||
164 | COL_DEP_PARENT, package, | ||
165 | COL_DEP_PACKAGE, depend) | ||
166 | |||
167 | for package in depgraph["rdepends-pn"]: | ||
168 | for rdepend in depgraph["rdepends-pn"][package]: | ||
169 | depends_model.set (depends_model.append(), | ||
170 | COL_DEP_TYPE, TYPE_RDEP, | ||
171 | COL_DEP_PARENT, package, | ||
172 | COL_DEP_PACKAGE, rdepend) | ||
173 | |||
174 | class ProgressBar(gtk.Window): | ||
175 | def __init__(self): | ||
176 | |||
177 | gtk.Window.__init__(self) | ||
178 | self.set_title("Parsing .bb files, please wait...") | ||
179 | self.set_default_size(500, 0) | ||
180 | self.connect("delete-event", gtk.main_quit) | ||
181 | |||
182 | self.progress = gtk.ProgressBar() | ||
183 | self.add(self.progress) | ||
184 | self.show_all() | ||
185 | |||
186 | class gtkthread(threading.Thread): | ||
187 | quit = threading.Event() | ||
188 | def __init__(self, shutdown): | ||
189 | threading.Thread.__init__(self) | ||
190 | self.setDaemon(True) | ||
191 | self.shutdown = shutdown | ||
192 | |||
193 | def run(self): | ||
194 | gobject.threads_init() | ||
195 | gtk.gdk.threads_init() | ||
196 | gtk.main() | ||
197 | gtkthread.quit.set() | ||
198 | |||
199 | def init(server, eventHandler): | ||
200 | |||
201 | try: | ||
202 | cmdline = server.runCommand(["getCmdLineAction"]) | ||
203 | if not cmdline or cmdline[0] != "generateDotGraph": | ||
204 | print "This UI is only compatible with the -g option" | ||
205 | return | ||
206 | ret = server.runCommand(["generateDepTreeEvent", cmdline[1], cmdline[2]]) | ||
207 | if ret != True: | ||
208 | print "Couldn't run command! %s" % ret | ||
209 | return | ||
210 | except xmlrpclib.Fault, x: | ||
211 | print "XMLRPC Fault getting commandline:\n %s" % x | ||
212 | return | ||
213 | |||
214 | shutdown = 0 | ||
215 | |||
216 | gtkgui = gtkthread(shutdown) | ||
217 | gtkgui.start() | ||
218 | |||
219 | gtk.gdk.threads_enter() | ||
220 | pbar = ProgressBar() | ||
221 | dep = DepExplorer() | ||
222 | gtk.gdk.threads_leave() | ||
223 | |||
224 | while True: | ||
225 | try: | ||
226 | event = eventHandler.waitEvent(0.25) | ||
227 | if gtkthread.quit.isSet(): | ||
228 | break | ||
229 | |||
230 | if event is None: | ||
231 | continue | ||
232 | if isinstance(event, bb.event.ParseProgress): | ||
233 | x = event.sofar | ||
234 | y = event.total | ||
235 | if x == y: | ||
236 | print("\nParsing finished. %d cached, %d parsed, %d skipped, %d masked, %d errors." | ||
237 | % ( event.cached, event.parsed, event.skipped, event.masked, event.errors)) | ||
238 | pbar.hide() | ||
239 | gtk.gdk.threads_enter() | ||
240 | pbar.progress.set_fraction(float(x)/float(y)) | ||
241 | pbar.progress.set_text("%d/%d (%2d %%)" % (x, y, x*100/y)) | ||
242 | gtk.gdk.threads_leave() | ||
243 | continue | ||
244 | |||
245 | if isinstance(event, bb.event.DepTreeGenerated): | ||
246 | gtk.gdk.threads_enter() | ||
247 | parse(event._depgraph, dep.pkg_model, dep.depends_model) | ||
248 | gtk.gdk.threads_leave() | ||
249 | |||
250 | if isinstance(event, bb.command.CookerCommandCompleted): | ||
251 | continue | ||
252 | if isinstance(event, bb.command.CookerCommandFailed): | ||
253 | print "Command execution failed: %s" % event.error | ||
254 | break | ||
255 | if isinstance(event, bb.cooker.CookerExit): | ||
256 | break | ||
257 | |||
258 | continue | ||
259 | |||
260 | except KeyboardInterrupt: | ||
261 | if shutdown == 2: | ||
262 | print "\nThird Keyboard Interrupt, exit.\n" | ||
263 | break | ||
264 | if shutdown == 1: | ||
265 | print "\nSecond Keyboard Interrupt, stopping...\n" | ||
266 | server.runCommand(["stateStop"]) | ||
267 | if shutdown == 0: | ||
268 | print "\nKeyboard Interrupt, closing down...\n" | ||
269 | server.runCommand(["stateShutdown"]) | ||
270 | shutdown = shutdown + 1 | ||
271 | pass | ||
272 | |||
diff --git a/bitbake-dev/lib/bb/ui/goggle.py b/bitbake-dev/lib/bb/ui/goggle.py deleted file mode 100644 index 94995d82db..0000000000 --- a/bitbake-dev/lib/bb/ui/goggle.py +++ /dev/null | |||
@@ -1,77 +0,0 @@ | |||
1 | # | ||
2 | # BitBake Graphical GTK User Interface | ||
3 | # | ||
4 | # Copyright (C) 2008 Intel Corporation | ||
5 | # | ||
6 | # Authored by Rob Bradford <rob@linux.intel.com> | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify | ||
9 | # it under the terms of the GNU General Public License version 2 as | ||
10 | # published by the Free Software Foundation. | ||
11 | # | ||
12 | # This program is distributed in the hope that it will be useful, | ||
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | # GNU General Public License for more details. | ||
16 | # | ||
17 | # You should have received a copy of the GNU General Public License along | ||
18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | |||
21 | import gobject | ||
22 | import gtk | ||
23 | import xmlrpclib | ||
24 | from bb.ui.crumbs.runningbuild import RunningBuildTreeView, RunningBuild | ||
25 | |||
26 | def event_handle_idle_func (eventHandler, build): | ||
27 | |||
28 | # Consume as many messages as we can in the time available to us | ||
29 | event = eventHandler.getEvent() | ||
30 | while event: | ||
31 | build.handle_event (event) | ||
32 | event = eventHandler.getEvent() | ||
33 | |||
34 | return True | ||
35 | |||
36 | class MainWindow (gtk.Window): | ||
37 | def __init__ (self): | ||
38 | gtk.Window.__init__ (self, gtk.WINDOW_TOPLEVEL) | ||
39 | |||
40 | # Setup tree view and the scrolled window | ||
41 | scrolled_window = gtk.ScrolledWindow () | ||
42 | self.add (scrolled_window) | ||
43 | self.cur_build_tv = RunningBuildTreeView() | ||
44 | scrolled_window.add (self.cur_build_tv) | ||
45 | |||
46 | def init (server, eventHandler): | ||
47 | gobject.threads_init() | ||
48 | gtk.gdk.threads_init() | ||
49 | |||
50 | window = MainWindow () | ||
51 | window.show_all () | ||
52 | |||
53 | # Create the object for the current build | ||
54 | running_build = RunningBuild () | ||
55 | window.cur_build_tv.set_model (running_build.model) | ||
56 | try: | ||
57 | cmdline = server.runCommand(["getCmdLineAction"]) | ||
58 | print cmdline | ||
59 | if not cmdline: | ||
60 | return 1 | ||
61 | ret = server.runCommand(cmdline) | ||
62 | if ret != True: | ||
63 | print "Couldn't get default commandline! %s" % ret | ||
64 | return 1 | ||
65 | except xmlrpclib.Fault, x: | ||
66 | print "XMLRPC Fault getting commandline:\n %s" % x | ||
67 | return 1 | ||
68 | |||
69 | # Use a timeout function for probing the event queue to find out if we | ||
70 | # have a message waiting for us. | ||
71 | gobject.timeout_add (200, | ||
72 | event_handle_idle_func, | ||
73 | eventHandler, | ||
74 | running_build) | ||
75 | |||
76 | gtk.main() | ||
77 | |||
diff --git a/bitbake-dev/lib/bb/ui/knotty.py b/bitbake-dev/lib/bb/ui/knotty.py deleted file mode 100644 index c69fd6ca64..0000000000 --- a/bitbake-dev/lib/bb/ui/knotty.py +++ /dev/null | |||
@@ -1,162 +0,0 @@ | |||
1 | # | ||
2 | # BitBake (No)TTY UI Implementation | ||
3 | # | ||
4 | # Handling output to TTYs or files (no TTY) | ||
5 | # | ||
6 | # Copyright (C) 2006-2007 Richard Purdie | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify | ||
9 | # it under the terms of the GNU General Public License version 2 as | ||
10 | # published by the Free Software Foundation. | ||
11 | # | ||
12 | # This program is distributed in the hope that it will be useful, | ||
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | # GNU General Public License for more details. | ||
16 | # | ||
17 | # You should have received a copy of the GNU General Public License along | ||
18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | |||
21 | import os | ||
22 | |||
23 | import sys | ||
24 | import itertools | ||
25 | import xmlrpclib | ||
26 | |||
27 | parsespin = itertools.cycle( r'|/-\\' ) | ||
28 | |||
29 | def init(server, eventHandler): | ||
30 | |||
31 | # Get values of variables which control our output | ||
32 | includelogs = server.runCommand(["getVariable", "BBINCLUDELOGS"]) | ||
33 | loglines = server.runCommand(["getVariable", "BBINCLUDELOGS_LINES"]) | ||
34 | |||
35 | try: | ||
36 | cmdline = server.runCommand(["getCmdLineAction"]) | ||
37 | #print cmdline | ||
38 | if not cmdline: | ||
39 | return 1 | ||
40 | ret = server.runCommand(cmdline) | ||
41 | if ret != True: | ||
42 | print "Couldn't get default commandline! %s" % ret | ||
43 | return 1 | ||
44 | except xmlrpclib.Fault, x: | ||
45 | print "XMLRPC Fault getting commandline:\n %s" % x | ||
46 | return 1 | ||
47 | |||
48 | shutdown = 0 | ||
49 | return_value = 0 | ||
50 | while True: | ||
51 | try: | ||
52 | event = eventHandler.waitEvent(0.25) | ||
53 | if event is None: | ||
54 | continue | ||
55 | #print event | ||
56 | if isinstance(event, bb.msg.MsgPlain): | ||
57 | print event._message | ||
58 | continue | ||
59 | if isinstance(event, bb.msg.MsgDebug): | ||
60 | print 'DEBUG: ' + event._message | ||
61 | continue | ||
62 | if isinstance(event, bb.msg.MsgNote): | ||
63 | print 'NOTE: ' + event._message | ||
64 | continue | ||
65 | if isinstance(event, bb.msg.MsgWarn): | ||
66 | print 'WARNING: ' + event._message | ||
67 | continue | ||
68 | if isinstance(event, bb.msg.MsgError): | ||
69 | return_value = 1 | ||
70 | print 'ERROR: ' + event._message | ||
71 | continue | ||
72 | if isinstance(event, bb.msg.MsgFatal): | ||
73 | return_value = 1 | ||
74 | print 'FATAL: ' + event._message | ||
75 | break | ||
76 | if isinstance(event, bb.build.TaskFailed): | ||
77 | return_value = 1 | ||
78 | logfile = event.logfile | ||
79 | if logfile: | ||
80 | print "ERROR: Logfile of failure stored in %s." % logfile | ||
81 | if 1 or includelogs: | ||
82 | print "Log data follows:" | ||
83 | f = open(logfile, "r") | ||
84 | lines = [] | ||
85 | while True: | ||
86 | l = f.readline() | ||
87 | if l == '': | ||
88 | break | ||
89 | l = l.rstrip() | ||
90 | if loglines: | ||
91 | lines.append(' | %s' % l) | ||
92 | if len(lines) > int(loglines): | ||
93 | lines.pop(0) | ||
94 | else: | ||
95 | print '| %s' % l | ||
96 | f.close() | ||
97 | if lines: | ||
98 | for line in lines: | ||
99 | print line | ||
100 | if isinstance(event, bb.build.TaskBase): | ||
101 | print "NOTE: %s" % event._message | ||
102 | continue | ||
103 | if isinstance(event, bb.event.ParseProgress): | ||
104 | x = event.sofar | ||
105 | y = event.total | ||
106 | if os.isatty(sys.stdout.fileno()): | ||
107 | sys.stdout.write("\rNOTE: Handling BitBake files: %s (%04d/%04d) [%2d %%]" % ( parsespin.next(), x, y, x*100/y ) ) | ||
108 | sys.stdout.flush() | ||
109 | else: | ||
110 | if x == 1: | ||
111 | sys.stdout.write("Parsing .bb files, please wait...") | ||
112 | sys.stdout.flush() | ||
113 | if x == y: | ||
114 | sys.stdout.write("done.") | ||
115 | sys.stdout.flush() | ||
116 | if x == y: | ||
117 | print("\nParsing of %d .bb files complete (%d cached, %d parsed). %d targets, %d skipped, %d masked, %d errors." | ||
118 | % ( event.total, event.cached, event.parsed, event.virtuals, event.skipped, event.masked, event.errors)) | ||
119 | continue | ||
120 | |||
121 | if isinstance(event, bb.command.CookerCommandCompleted): | ||
122 | break | ||
123 | if isinstance(event, bb.command.CookerCommandSetExitCode): | ||
124 | return_value = event.exitcode | ||
125 | continue | ||
126 | if isinstance(event, bb.command.CookerCommandFailed): | ||
127 | return_value = 1 | ||
128 | print "Command execution failed: %s" % event.error | ||
129 | break | ||
130 | if isinstance(event, bb.cooker.CookerExit): | ||
131 | break | ||
132 | |||
133 | # ignore | ||
134 | if isinstance(event, bb.event.BuildStarted): | ||
135 | continue | ||
136 | if isinstance(event, bb.event.BuildCompleted): | ||
137 | continue | ||
138 | if isinstance(event, bb.event.MultipleProviders): | ||
139 | continue | ||
140 | if isinstance(event, bb.runqueue.runQueueEvent): | ||
141 | continue | ||
142 | if isinstance(event, bb.event.StampUpdate): | ||
143 | continue | ||
144 | if isinstance(event, bb.event.ConfigParsed): | ||
145 | continue | ||
146 | if isinstance(event, bb.event.RecipeParsed): | ||
147 | continue | ||
148 | print "Unknown Event: %s" % event | ||
149 | |||
150 | except KeyboardInterrupt: | ||
151 | if shutdown == 2: | ||
152 | print "\nThird Keyboard Interrupt, exit.\n" | ||
153 | break | ||
154 | if shutdown == 1: | ||
155 | print "\nSecond Keyboard Interrupt, stopping...\n" | ||
156 | server.runCommand(["stateStop"]) | ||
157 | if shutdown == 0: | ||
158 | print "\nKeyboard Interrupt, closing down...\n" | ||
159 | server.runCommand(["stateShutdown"]) | ||
160 | shutdown = shutdown + 1 | ||
161 | pass | ||
162 | return return_value | ||
diff --git a/bitbake-dev/lib/bb/ui/ncurses.py b/bitbake-dev/lib/bb/ui/ncurses.py deleted file mode 100644 index 14310dc124..0000000000 --- a/bitbake-dev/lib/bb/ui/ncurses.py +++ /dev/null | |||
@@ -1,335 +0,0 @@ | |||
1 | # | ||
2 | # BitBake Curses UI Implementation | ||
3 | # | ||
4 | # Implements an ncurses frontend for the BitBake utility. | ||
5 | # | ||
6 | # Copyright (C) 2006 Michael 'Mickey' Lauer | ||
7 | # Copyright (C) 2006-2007 Richard Purdie | ||
8 | # | ||
9 | # This program is free software; you can redistribute it and/or modify | ||
10 | # it under the terms of the GNU General Public License version 2 as | ||
11 | # published by the Free Software Foundation. | ||
12 | # | ||
13 | # This program is distributed in the hope that it will be useful, | ||
14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | # GNU General Public License for more details. | ||
17 | # | ||
18 | # You should have received a copy of the GNU General Public License along | ||
19 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
21 | |||
22 | """ | ||
23 | We have the following windows: | ||
24 | |||
25 | 1.) Main Window: Shows what we are ultimately building and how far we are. Includes status bar | ||
26 | 2.) Thread Activity Window: Shows one status line for every concurrent bitbake thread. | ||
27 | 3.) Command Line Window: Contains an interactive command line where you can interact w/ Bitbake. | ||
28 | |||
29 | Basic window layout is like that: | ||
30 | |||
31 | |---------------------------------------------------------| | ||
32 | | <Main Window> | <Thread Activity Window> | | ||
33 | | | 0: foo do_compile complete| | ||
34 | | Building Gtk+-2.6.10 | 1: bar do_patch complete | | ||
35 | | Status: 60% | ... | | ||
36 | | | ... | | ||
37 | | | ... | | ||
38 | |---------------------------------------------------------| | ||
39 | |<Command Line Window> | | ||
40 | |>>> which virtual/kernel | | ||
41 | |openzaurus-kernel | | ||
42 | |>>> _ | | ||
43 | |---------------------------------------------------------| | ||
44 | |||
45 | """ | ||
46 | |||
47 | import os, sys, curses, itertools, time | ||
48 | import bb | ||
49 | import xmlrpclib | ||
50 | from bb import ui | ||
51 | from bb.ui import uihelper | ||
52 | |||
53 | parsespin = itertools.cycle( r'|/-\\' ) | ||
54 | |||
55 | X = 0 | ||
56 | Y = 1 | ||
57 | WIDTH = 2 | ||
58 | HEIGHT = 3 | ||
59 | |||
60 | MAXSTATUSLENGTH = 32 | ||
61 | |||
62 | class NCursesUI: | ||
63 | """ | ||
64 | NCurses UI Class | ||
65 | """ | ||
66 | class Window: | ||
67 | """Base Window Class""" | ||
68 | def __init__( self, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ): | ||
69 | self.win = curses.newwin( height, width, y, x ) | ||
70 | self.dimensions = ( x, y, width, height ) | ||
71 | """ | ||
72 | if curses.has_colors(): | ||
73 | color = 1 | ||
74 | curses.init_pair( color, fg, bg ) | ||
75 | self.win.bkgdset( ord(' '), curses.color_pair(color) ) | ||
76 | else: | ||
77 | self.win.bkgdset( ord(' '), curses.A_BOLD ) | ||
78 | """ | ||
79 | self.erase() | ||
80 | self.setScrolling() | ||
81 | self.win.noutrefresh() | ||
82 | |||
83 | def erase( self ): | ||
84 | self.win.erase() | ||
85 | |||
86 | def setScrolling( self, b = True ): | ||
87 | self.win.scrollok( b ) | ||
88 | self.win.idlok( b ) | ||
89 | |||
90 | def setBoxed( self ): | ||
91 | self.boxed = True | ||
92 | self.win.box() | ||
93 | self.win.noutrefresh() | ||
94 | |||
95 | def setText( self, x, y, text, *args ): | ||
96 | self.win.addstr( y, x, text, *args ) | ||
97 | self.win.noutrefresh() | ||
98 | |||
99 | def appendText( self, text, *args ): | ||
100 | self.win.addstr( text, *args ) | ||
101 | self.win.noutrefresh() | ||
102 | |||
103 | def drawHline( self, y ): | ||
104 | self.win.hline( y, 0, curses.ACS_HLINE, self.dimensions[WIDTH] ) | ||
105 | self.win.noutrefresh() | ||
106 | |||
107 | class DecoratedWindow( Window ): | ||
108 | """Base class for windows with a box and a title bar""" | ||
109 | def __init__( self, title, x, y, width, height, fg=curses.COLOR_BLACK, bg=curses.COLOR_WHITE ): | ||
110 | NCursesUI.Window.__init__( self, x+1, y+3, width-2, height-4, fg, bg ) | ||
111 | self.decoration = NCursesUI.Window( x, y, width, height, fg, bg ) | ||
112 | self.decoration.setBoxed() | ||
113 | self.decoration.win.hline( 2, 1, curses.ACS_HLINE, width-2 ) | ||
114 | self.setTitle( title ) | ||
115 | |||
116 | def setTitle( self, title ): | ||
117 | self.decoration.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD ) | ||
118 | |||
119 | #-------------------------------------------------------------------------# | ||
120 | # class TitleWindow( Window ): | ||
121 | #-------------------------------------------------------------------------# | ||
122 | # """Title Window""" | ||
123 | # def __init__( self, x, y, width, height ): | ||
124 | # NCursesUI.Window.__init__( self, x, y, width, height ) | ||
125 | # version = bb.__version__ | ||
126 | # title = "BitBake %s" % version | ||
127 | # credit = "(C) 2003-2007 Team BitBake" | ||
128 | # #self.win.hline( 2, 1, curses.ACS_HLINE, width-2 ) | ||
129 | # self.win.border() | ||
130 | # self.setText( 1, 1, title.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD ) | ||
131 | # self.setText( 1, 2, credit.center( self.dimensions[WIDTH]-2 ), curses.A_BOLD ) | ||
132 | |||
133 | #-------------------------------------------------------------------------# | ||
134 | class ThreadActivityWindow( DecoratedWindow ): | ||
135 | #-------------------------------------------------------------------------# | ||
136 | """Thread Activity Window""" | ||
137 | def __init__( self, x, y, width, height ): | ||
138 | NCursesUI.DecoratedWindow.__init__( self, "Thread Activity", x, y, width, height ) | ||
139 | |||
140 | def setStatus( self, thread, text ): | ||
141 | line = "%02d: %s" % ( thread, text ) | ||
142 | width = self.dimensions[WIDTH] | ||
143 | if ( len(line) > width ): | ||
144 | line = line[:width-3] + "..." | ||
145 | else: | ||
146 | line = line.ljust( width ) | ||
147 | self.setText( 0, thread, line ) | ||
148 | |||
149 | #-------------------------------------------------------------------------# | ||
150 | class MainWindow( DecoratedWindow ): | ||
151 | #-------------------------------------------------------------------------# | ||
152 | """Main Window""" | ||
153 | def __init__( self, x, y, width, height ): | ||
154 | self.StatusPosition = width - MAXSTATUSLENGTH | ||
155 | NCursesUI.DecoratedWindow.__init__( self, None, x, y, width, height ) | ||
156 | curses.nl() | ||
157 | |||
158 | def setTitle( self, title ): | ||
159 | title = "BitBake %s" % bb.__version__ | ||
160 | self.decoration.setText( 2, 1, title, curses.A_BOLD ) | ||
161 | self.decoration.setText( self.StatusPosition - 8, 1, "Status:", curses.A_BOLD ) | ||
162 | |||
163 | def setStatus(self, status): | ||
164 | while len(status) < MAXSTATUSLENGTH: | ||
165 | status = status + " " | ||
166 | self.decoration.setText( self.StatusPosition, 1, status, curses.A_BOLD ) | ||
167 | |||
168 | |||
169 | #-------------------------------------------------------------------------# | ||
170 | class ShellOutputWindow( DecoratedWindow ): | ||
171 | #-------------------------------------------------------------------------# | ||
172 | """Interactive Command Line Output""" | ||
173 | def __init__( self, x, y, width, height ): | ||
174 | NCursesUI.DecoratedWindow.__init__( self, "Command Line Window", x, y, width, height ) | ||
175 | |||
176 | #-------------------------------------------------------------------------# | ||
177 | class ShellInputWindow( Window ): | ||
178 | #-------------------------------------------------------------------------# | ||
179 | """Interactive Command Line Input""" | ||
180 | def __init__( self, x, y, width, height ): | ||
181 | NCursesUI.Window.__init__( self, x, y, width, height ) | ||
182 | |||
183 | # put that to the top again from curses.textpad import Textbox | ||
184 | # self.textbox = Textbox( self.win ) | ||
185 | # t = threading.Thread() | ||
186 | # t.run = self.textbox.edit | ||
187 | # t.start() | ||
188 | |||
189 | #-------------------------------------------------------------------------# | ||
190 | def main(self, stdscr, server, eventHandler): | ||
191 | #-------------------------------------------------------------------------# | ||
192 | height, width = stdscr.getmaxyx() | ||
193 | |||
194 | # for now split it like that: | ||
195 | # MAIN_y + THREAD_y = 2/3 screen at the top | ||
196 | # MAIN_x = 2/3 left, THREAD_y = 1/3 right | ||
197 | # CLI_y = 1/3 of screen at the bottom | ||
198 | # CLI_x = full | ||
199 | |||
200 | main_left = 0 | ||
201 | main_top = 0 | ||
202 | main_height = ( height / 3 * 2 ) | ||
203 | main_width = ( width / 3 ) * 2 | ||
204 | clo_left = main_left | ||
205 | clo_top = main_top + main_height | ||
206 | clo_height = height - main_height - main_top - 1 | ||
207 | clo_width = width | ||
208 | cli_left = main_left | ||
209 | cli_top = clo_top + clo_height | ||
210 | cli_height = 1 | ||
211 | cli_width = width | ||
212 | thread_left = main_left + main_width | ||
213 | thread_top = main_top | ||
214 | thread_height = main_height | ||
215 | thread_width = width - main_width | ||
216 | |||
217 | #tw = self.TitleWindow( 0, 0, width, main_top ) | ||
218 | mw = self.MainWindow( main_left, main_top, main_width, main_height ) | ||
219 | taw = self.ThreadActivityWindow( thread_left, thread_top, thread_width, thread_height ) | ||
220 | clo = self.ShellOutputWindow( clo_left, clo_top, clo_width, clo_height ) | ||
221 | cli = self.ShellInputWindow( cli_left, cli_top, cli_width, cli_height ) | ||
222 | cli.setText( 0, 0, "BB>" ) | ||
223 | |||
224 | mw.setStatus("Idle") | ||
225 | |||
226 | helper = uihelper.BBUIHelper() | ||
227 | shutdown = 0 | ||
228 | |||
229 | try: | ||
230 | cmdline = server.runCommand(["getCmdLineAction"]) | ||
231 | if not cmdline: | ||
232 | return | ||
233 | ret = server.runCommand(cmdline) | ||
234 | if ret != True: | ||
235 | print "Couldn't get default commandlind! %s" % ret | ||
236 | return | ||
237 | except xmlrpclib.Fault, x: | ||
238 | print "XMLRPC Fault getting commandline:\n %s" % x | ||
239 | return | ||
240 | |||
241 | exitflag = False | ||
242 | while not exitflag: | ||
243 | try: | ||
244 | event = eventHandler.waitEvent(0.25) | ||
245 | if not event: | ||
246 | continue | ||
247 | helper.eventHandler(event) | ||
248 | #mw.appendText("%s\n" % event[0]) | ||
249 | if isinstance(event, bb.build.Task): | ||
250 | mw.appendText("NOTE: %s\n" % event._message) | ||
251 | if isinstance(event, bb.msg.MsgDebug): | ||
252 | mw.appendText('DEBUG: ' + event._message + '\n') | ||
253 | if isinstance(event, bb.msg.MsgNote): | ||
254 | mw.appendText('NOTE: ' + event._message + '\n') | ||
255 | if isinstance(event, bb.msg.MsgWarn): | ||
256 | mw.appendText('WARNING: ' + event._message + '\n') | ||
257 | if isinstance(event, bb.msg.MsgError): | ||
258 | mw.appendText('ERROR: ' + event._message + '\n') | ||
259 | if isinstance(event, bb.msg.MsgFatal): | ||
260 | mw.appendText('FATAL: ' + event._message + '\n') | ||
261 | if isinstance(event, bb.event.ParseProgress): | ||
262 | x = event.sofar | ||
263 | y = event.total | ||
264 | if x == y: | ||
265 | mw.setStatus("Idle") | ||
266 | mw.appendText("Parsing finished. %d cached, %d parsed, %d skipped, %d masked." | ||
267 | % ( event.cached, event.parsed, event.skipped, event.masked )) | ||
268 | else: | ||
269 | mw.setStatus("Parsing: %s (%04d/%04d) [%2d %%]" % ( parsespin.next(), x, y, x*100/y ) ) | ||
270 | # if isinstance(event, bb.build.TaskFailed): | ||
271 | # if event.logfile: | ||
272 | # if data.getVar("BBINCLUDELOGS", d): | ||
273 | # bb.msg.error(bb.msg.domain.Build, "log data follows (%s)" % logfile) | ||
274 | # number_of_lines = data.getVar("BBINCLUDELOGS_LINES", d) | ||
275 | # if number_of_lines: | ||
276 | # os.system('tail -n%s %s' % (number_of_lines, logfile)) | ||
277 | # else: | ||
278 | # f = open(logfile, "r") | ||
279 | # while True: | ||
280 | # l = f.readline() | ||
281 | # if l == '': | ||
282 | # break | ||
283 | # l = l.rstrip() | ||
284 | # print '| %s' % l | ||
285 | # f.close() | ||
286 | # else: | ||
287 | # bb.msg.error(bb.msg.domain.Build, "see log in %s" % logfile) | ||
288 | |||
289 | if isinstance(event, bb.command.CookerCommandCompleted): | ||
290 | exitflag = True | ||
291 | if isinstance(event, bb.command.CookerCommandFailed): | ||
292 | mw.appendText("Command execution failed: %s" % event.error) | ||
293 | time.sleep(2) | ||
294 | exitflag = True | ||
295 | if isinstance(event, bb.cooker.CookerExit): | ||
296 | exitflag = True | ||
297 | |||
298 | if helper.needUpdate: | ||
299 | activetasks, failedtasks = helper.getTasks() | ||
300 | taw.erase() | ||
301 | taw.setText(0, 0, "") | ||
302 | if activetasks: | ||
303 | taw.appendText("Active Tasks:\n") | ||
304 | for task in activetasks: | ||
305 | taw.appendText(task) | ||
306 | if failedtasks: | ||
307 | taw.appendText("Failed Tasks:\n") | ||
308 | for task in failedtasks: | ||
309 | taw.appendText(task) | ||
310 | |||
311 | curses.doupdate() | ||
312 | except KeyboardInterrupt: | ||
313 | if shutdown == 2: | ||
314 | mw.appendText("Third Keyboard Interrupt, exit.\n") | ||
315 | exitflag = True | ||
316 | if shutdown == 1: | ||
317 | mw.appendText("Second Keyboard Interrupt, stopping...\n") | ||
318 | server.runCommand(["stateStop"]) | ||
319 | if shutdown == 0: | ||
320 | mw.appendText("Keyboard Interrupt, closing down...\n") | ||
321 | server.runCommand(["stateShutdown"]) | ||
322 | shutdown = shutdown + 1 | ||
323 | pass | ||
324 | |||
325 | def init(server, eventHandler): | ||
326 | if not os.isatty(sys.stdout.fileno()): | ||
327 | print "FATAL: Unable to run 'ncurses' UI without a TTY." | ||
328 | return | ||
329 | ui = NCursesUI() | ||
330 | try: | ||
331 | curses.wrapper(ui.main, server, eventHandler) | ||
332 | except: | ||
333 | import traceback | ||
334 | traceback.print_exc() | ||
335 | |||
diff --git a/bitbake-dev/lib/bb/ui/puccho.py b/bitbake-dev/lib/bb/ui/puccho.py deleted file mode 100644 index 713aa1f4a6..0000000000 --- a/bitbake-dev/lib/bb/ui/puccho.py +++ /dev/null | |||
@@ -1,425 +0,0 @@ | |||
1 | # | ||
2 | # BitBake Graphical GTK User Interface | ||
3 | # | ||
4 | # Copyright (C) 2008 Intel Corporation | ||
5 | # | ||
6 | # Authored by Rob Bradford <rob@linux.intel.com> | ||
7 | # | ||
8 | # This program is free software; you can redistribute it and/or modify | ||
9 | # it under the terms of the GNU General Public License version 2 as | ||
10 | # published by the Free Software Foundation. | ||
11 | # | ||
12 | # This program is distributed in the hope that it will be useful, | ||
13 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
14 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
15 | # GNU General Public License for more details. | ||
16 | # | ||
17 | # You should have received a copy of the GNU General Public License along | ||
18 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
19 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
20 | |||
21 | import gtk | ||
22 | import gobject | ||
23 | import gtk.glade | ||
24 | import threading | ||
25 | import urllib2 | ||
26 | import os | ||
27 | |||
28 | from bb.ui.crumbs.buildmanager import BuildManager, BuildConfiguration | ||
29 | from bb.ui.crumbs.buildmanager import BuildManagerTreeView | ||
30 | |||
31 | from bb.ui.crumbs.runningbuild import RunningBuild, RunningBuildTreeView | ||
32 | |||
33 | # The metadata loader is used by the BuildSetupDialog to download the | ||
34 | # available options to populate the dialog | ||
35 | class MetaDataLoader(gobject.GObject): | ||
36 | """ This class provides the mechanism for loading the metadata (the | ||
37 | fetching and parsing) from a given URL. The metadata encompasses details | ||
38 | on what machines are available. The distribution and images available for | ||
39 | the machine and the the uris to use for building the given machine.""" | ||
40 | __gsignals__ = { | ||
41 | 'success' : (gobject.SIGNAL_RUN_LAST, | ||
42 | gobject.TYPE_NONE, | ||
43 | ()), | ||
44 | 'error' : (gobject.SIGNAL_RUN_LAST, | ||
45 | gobject.TYPE_NONE, | ||
46 | (gobject.TYPE_STRING,)) | ||
47 | } | ||
48 | |||
49 | # We use these little helper functions to ensure that we take the gdk lock | ||
50 | # when emitting the signal. These functions are called as idles (so that | ||
51 | # they happen in the gtk / main thread's main loop. | ||
52 | def emit_error_signal (self, remark): | ||
53 | gtk.gdk.threads_enter() | ||
54 | self.emit ("error", remark) | ||
55 | gtk.gdk.threads_leave() | ||
56 | |||
57 | def emit_success_signal (self): | ||
58 | gtk.gdk.threads_enter() | ||
59 | self.emit ("success") | ||
60 | gtk.gdk.threads_leave() | ||
61 | |||
62 | def __init__ (self): | ||
63 | gobject.GObject.__init__ (self) | ||
64 | |||
65 | class LoaderThread(threading.Thread): | ||
66 | """ This class provides an asynchronous loader for the metadata (by | ||
67 | using threads and signals). This is useful since the metadata may be | ||
68 | at a remote URL.""" | ||
69 | class LoaderImportException (Exception): | ||
70 | pass | ||
71 | |||
72 | def __init__(self, loader, url): | ||
73 | threading.Thread.__init__ (self) | ||
74 | self.url = url | ||
75 | self.loader = loader | ||
76 | |||
77 | def run (self): | ||
78 | result = {} | ||
79 | try: | ||
80 | f = urllib2.urlopen (self.url) | ||
81 | |||
82 | # Parse the metadata format. The format is.... | ||
83 | # <machine>;<default distro>|<distro>...;<default image>|<image>...;<type##url>|... | ||
84 | for line in f.readlines(): | ||
85 | components = line.split(";") | ||
86 | if (len (components) < 4): | ||
87 | raise MetaDataLoader.LoaderThread.LoaderImportException | ||
88 | machine = components[0] | ||
89 | distros = components[1].split("|") | ||
90 | images = components[2].split("|") | ||
91 | urls = components[3].split("|") | ||
92 | |||
93 | result[machine] = (distros, images, urls) | ||
94 | |||
95 | # Create an object representing this *potential* | ||
96 | # configuration. It can become concrete if the machine, distro | ||
97 | # and image are all chosen in the UI | ||
98 | configuration = BuildConfiguration() | ||
99 | configuration.metadata_url = self.url | ||
100 | configuration.machine_options = result | ||
101 | self.loader.configuration = configuration | ||
102 | |||
103 | # Emit that we've actually got a configuration | ||
104 | gobject.idle_add (MetaDataLoader.emit_success_signal, | ||
105 | self.loader) | ||
106 | |||
107 | except MetaDataLoader.LoaderThread.LoaderImportException, e: | ||
108 | gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader, | ||
109 | "Repository metadata corrupt") | ||
110 | except Exception, e: | ||
111 | gobject.idle_add (MetaDataLoader.emit_error_signal, self.loader, | ||
112 | "Unable to download repository metadata") | ||
113 | print e | ||
114 | |||
115 | def try_fetch_from_url (self, url): | ||
116 | # Try and download the metadata. Firing a signal if successful | ||
117 | thread = MetaDataLoader.LoaderThread(self, url) | ||
118 | thread.start() | ||
119 | |||
120 | class BuildSetupDialog (gtk.Dialog): | ||
121 | RESPONSE_BUILD = 1 | ||
122 | |||
123 | # A little helper method that just sets the states on the widgets based on | ||
124 | # whether we've got good metadata or not. | ||
125 | def set_configurable (self, configurable): | ||
126 | if (self.configurable == configurable): | ||
127 | return | ||
128 | |||
129 | self.configurable = configurable | ||
130 | for widget in self.conf_widgets: | ||
131 | widget.set_sensitive (configurable) | ||
132 | |||
133 | if not configurable: | ||
134 | self.machine_combo.set_active (-1) | ||
135 | self.distribution_combo.set_active (-1) | ||
136 | self.image_combo.set_active (-1) | ||
137 | |||
138 | # GTK widget callbacks | ||
139 | def refresh_button_clicked (self, button): | ||
140 | # Refresh button clicked. | ||
141 | |||
142 | url = self.location_entry.get_chars (0, -1) | ||
143 | self.loader.try_fetch_from_url(url) | ||
144 | |||
145 | def repository_entry_editable_changed (self, entry): | ||
146 | if (len (entry.get_chars (0, -1)) > 0): | ||
147 | self.refresh_button.set_sensitive (True) | ||
148 | else: | ||
149 | self.refresh_button.set_sensitive (False) | ||
150 | self.clear_status_message() | ||
151 | |||
152 | # If we were previously configurable we are no longer since the | ||
153 | # location entry has been changed | ||
154 | self.set_configurable (False) | ||
155 | |||
156 | def machine_combo_changed (self, combobox): | ||
157 | active_iter = combobox.get_active_iter() | ||
158 | |||
159 | if not active_iter: | ||
160 | return | ||
161 | |||
162 | model = combobox.get_model() | ||
163 | |||
164 | if model: | ||
165 | chosen_machine = model.get (active_iter, 0)[0] | ||
166 | |||
167 | (distros_model, images_model) = \ | ||
168 | self.loader.configuration.get_distro_and_images_models (chosen_machine) | ||
169 | |||
170 | self.distribution_combo.set_model (distros_model) | ||
171 | self.image_combo.set_model (images_model) | ||
172 | |||
173 | # Callbacks from the loader | ||
174 | def loader_success_cb (self, loader): | ||
175 | self.status_image.set_from_icon_name ("info", | ||
176 | gtk.ICON_SIZE_BUTTON) | ||
177 | self.status_image.show() | ||
178 | self.status_label.set_label ("Repository metadata successfully downloaded") | ||
179 | |||
180 | # Set the models on the combo boxes based on the models generated from | ||
181 | # the configuration that the loader has created | ||
182 | |||
183 | # We just need to set the machine here, that then determines the | ||
184 | # distro and image options. Cunning huh? :-) | ||
185 | |||
186 | self.configuration = self.loader.configuration | ||
187 | model = self.configuration.get_machines_model () | ||
188 | self.machine_combo.set_model (model) | ||
189 | |||
190 | self.set_configurable (True) | ||
191 | |||
192 | def loader_error_cb (self, loader, message): | ||
193 | self.status_image.set_from_icon_name ("error", | ||
194 | gtk.ICON_SIZE_BUTTON) | ||
195 | self.status_image.show() | ||
196 | self.status_label.set_text ("Error downloading repository metadata") | ||
197 | for widget in self.conf_widgets: | ||
198 | widget.set_sensitive (False) | ||
199 | |||
200 | def clear_status_message (self): | ||
201 | self.status_image.hide() | ||
202 | self.status_label.set_label ( | ||
203 | """<i>Enter the repository location and press _Refresh</i>""") | ||
204 | |||
205 | def __init__ (self): | ||
206 | gtk.Dialog.__init__ (self) | ||
207 | |||
208 | # Cancel | ||
209 | self.add_button (gtk.STOCK_CANCEL, gtk.RESPONSE_CANCEL) | ||
210 | |||
211 | # Build | ||
212 | button = gtk.Button ("_Build", None, True) | ||
213 | image = gtk.Image () | ||
214 | image.set_from_stock (gtk.STOCK_EXECUTE,gtk.ICON_SIZE_BUTTON) | ||
215 | button.set_image (image) | ||
216 | self.add_action_widget (button, BuildSetupDialog.RESPONSE_BUILD) | ||
217 | button.show_all () | ||
218 | |||
219 | # Pull in *just* the table from the Glade XML data. | ||
220 | gxml = gtk.glade.XML (os.path.dirname(__file__) + "/crumbs/puccho.glade", | ||
221 | root = "build_table") | ||
222 | table = gxml.get_widget ("build_table") | ||
223 | self.vbox.pack_start (table, True, False, 0) | ||
224 | |||
225 | # Grab all the widgets that we need to turn on/off when we refresh... | ||
226 | self.conf_widgets = [] | ||
227 | self.conf_widgets += [gxml.get_widget ("machine_label")] | ||
228 | self.conf_widgets += [gxml.get_widget ("distribution_label")] | ||
229 | self.conf_widgets += [gxml.get_widget ("image_label")] | ||
230 | self.conf_widgets += [gxml.get_widget ("machine_combo")] | ||
231 | self.conf_widgets += [gxml.get_widget ("distribution_combo")] | ||
232 | self.conf_widgets += [gxml.get_widget ("image_combo")] | ||
233 | |||
234 | # Grab the status widgets | ||
235 | self.status_image = gxml.get_widget ("status_image") | ||
236 | self.status_label = gxml.get_widget ("status_label") | ||
237 | |||
238 | # Grab the refresh button and connect to the clicked signal | ||
239 | self.refresh_button = gxml.get_widget ("refresh_button") | ||
240 | self.refresh_button.connect ("clicked", self.refresh_button_clicked) | ||
241 | |||
242 | # Grab the location entry and connect to editable::changed | ||
243 | self.location_entry = gxml.get_widget ("location_entry") | ||
244 | self.location_entry.connect ("changed", | ||
245 | self.repository_entry_editable_changed) | ||
246 | |||
247 | # Grab the machine combo and hook onto the changed signal. This then | ||
248 | # allows us to populate the distro and image combos | ||
249 | self.machine_combo = gxml.get_widget ("machine_combo") | ||
250 | self.machine_combo.connect ("changed", self.machine_combo_changed) | ||
251 | |||
252 | # Setup the combo | ||
253 | cell = gtk.CellRendererText() | ||
254 | self.machine_combo.pack_start(cell, True) | ||
255 | self.machine_combo.add_attribute(cell, 'text', 0) | ||
256 | |||
257 | # Grab the distro and image combos. We need these to populate with | ||
258 | # models once the machine is chosen | ||
259 | self.distribution_combo = gxml.get_widget ("distribution_combo") | ||
260 | cell = gtk.CellRendererText() | ||
261 | self.distribution_combo.pack_start(cell, True) | ||
262 | self.distribution_combo.add_attribute(cell, 'text', 0) | ||
263 | |||
264 | self.image_combo = gxml.get_widget ("image_combo") | ||
265 | cell = gtk.CellRendererText() | ||
266 | self.image_combo.pack_start(cell, True) | ||
267 | self.image_combo.add_attribute(cell, 'text', 0) | ||
268 | |||
269 | # Put the default descriptive text in the status box | ||
270 | self.clear_status_message() | ||
271 | |||
272 | # Mark as non-configurable, this is just greys out the widgets the | ||
273 | # user can't yet use | ||
274 | self.configurable = False | ||
275 | self.set_configurable(False) | ||
276 | |||
277 | # Show the table | ||
278 | table.show_all () | ||
279 | |||
280 | # The loader and some signals connected to it to update the status | ||
281 | # area | ||
282 | self.loader = MetaDataLoader() | ||
283 | self.loader.connect ("success", self.loader_success_cb) | ||
284 | self.loader.connect ("error", self.loader_error_cb) | ||
285 | |||
286 | def update_configuration (self): | ||
287 | """ A poorly named function but it updates the internal configuration | ||
288 | from the widgets. This can make that configuration concrete and can | ||
289 | thus be used for building """ | ||
290 | # Extract the chosen machine from the combo | ||
291 | model = self.machine_combo.get_model() | ||
292 | active_iter = self.machine_combo.get_active_iter() | ||
293 | if (active_iter): | ||
294 | self.configuration.machine = model.get(active_iter, 0)[0] | ||
295 | |||
296 | # Extract the chosen distro from the combo | ||
297 | model = self.distribution_combo.get_model() | ||
298 | active_iter = self.distribution_combo.get_active_iter() | ||
299 | if (active_iter): | ||
300 | self.configuration.distro = model.get(active_iter, 0)[0] | ||
301 | |||
302 | # Extract the chosen image from the combo | ||
303 | model = self.image_combo.get_model() | ||
304 | active_iter = self.image_combo.get_active_iter() | ||
305 | if (active_iter): | ||
306 | self.configuration.image = model.get(active_iter, 0)[0] | ||
307 | |||
308 | # This function operates to pull events out from the event queue and then push | ||
309 | # them into the RunningBuild (which then drives the RunningBuild which then | ||
310 | # pushes through and updates the progress tree view.) | ||
311 | # | ||
312 | # TODO: Should be a method on the RunningBuild class | ||
313 | def event_handle_timeout (eventHandler, build): | ||
314 | # Consume as many messages as we can ... | ||
315 | event = eventHandler.getEvent() | ||
316 | while event: | ||
317 | build.handle_event (event) | ||
318 | event = eventHandler.getEvent() | ||
319 | return True | ||
320 | |||
321 | class MainWindow (gtk.Window): | ||
322 | |||
323 | # Callback that gets fired when the user hits a button in the | ||
324 | # BuildSetupDialog. | ||
325 | def build_dialog_box_response_cb (self, dialog, response_id): | ||
326 | conf = None | ||
327 | if (response_id == BuildSetupDialog.RESPONSE_BUILD): | ||
328 | dialog.update_configuration() | ||
329 | print dialog.configuration.machine, dialog.configuration.distro, \ | ||
330 | dialog.configuration.image | ||
331 | conf = dialog.configuration | ||
332 | |||
333 | dialog.destroy() | ||
334 | |||
335 | if conf: | ||
336 | self.manager.do_build (conf) | ||
337 | |||
338 | def build_button_clicked_cb (self, button): | ||
339 | dialog = BuildSetupDialog () | ||
340 | |||
341 | # For some unknown reason Dialog.run causes nice little deadlocks ... :-( | ||
342 | dialog.connect ("response", self.build_dialog_box_response_cb) | ||
343 | dialog.show() | ||
344 | |||
345 | def __init__ (self): | ||
346 | gtk.Window.__init__ (self) | ||
347 | |||
348 | # Pull in *just* the main vbox from the Glade XML data and then pack | ||
349 | # that inside the window | ||
350 | gxml = gtk.glade.XML (os.path.dirname(__file__) + "/crumbs/puccho.glade", | ||
351 | root = "main_window_vbox") | ||
352 | vbox = gxml.get_widget ("main_window_vbox") | ||
353 | self.add (vbox) | ||
354 | |||
355 | # Create the tree views for the build manager view and the progress view | ||
356 | self.build_manager_view = BuildManagerTreeView() | ||
357 | self.running_build_view = RunningBuildTreeView() | ||
358 | |||
359 | # Grab the scrolled windows that we put the tree views into | ||
360 | self.results_scrolledwindow = gxml.get_widget ("results_scrolledwindow") | ||
361 | self.progress_scrolledwindow = gxml.get_widget ("progress_scrolledwindow") | ||
362 | |||
363 | # Put the tree views inside ... | ||
364 | self.results_scrolledwindow.add (self.build_manager_view) | ||
365 | self.progress_scrolledwindow.add (self.running_build_view) | ||
366 | |||
367 | # Hook up the build button... | ||
368 | self.build_button = gxml.get_widget ("main_toolbutton_build") | ||
369 | self.build_button.connect ("clicked", self.build_button_clicked_cb) | ||
370 | |||
371 | # I'm not very happy about the current ownership of the RunningBuild. I have | ||
372 | # my suspicions that this object should be held by the BuildManager since we | ||
373 | # care about the signals in the manager | ||
374 | |||
375 | def running_build_succeeded_cb (running_build, manager): | ||
376 | # Notify the manager that a build has succeeded. This is necessary as part | ||
377 | # of the 'hack' that we use for making the row in the model / view | ||
378 | # representing the ongoing build change into a row representing the | ||
379 | # completed build. Since we know only one build can be running a time then | ||
380 | # we can handle this. | ||
381 | |||
382 | # FIXME: Refactor all this so that the RunningBuild is owned by the | ||
383 | # BuildManager. It can then hook onto the signals directly and drive | ||
384 | # interesting things it cares about. | ||
385 | manager.notify_build_succeeded () | ||
386 | print "build succeeded" | ||
387 | |||
388 | def running_build_failed_cb (running_build, manager): | ||
389 | # As above | ||
390 | print "build failed" | ||
391 | manager.notify_build_failed () | ||
392 | |||
393 | def init (server, eventHandler): | ||
394 | # Initialise threading... | ||
395 | gobject.threads_init() | ||
396 | gtk.gdk.threads_init() | ||
397 | |||
398 | main_window = MainWindow () | ||
399 | main_window.show_all () | ||
400 | |||
401 | # Set up the build manager stuff in general | ||
402 | builds_dir = os.path.join (os.getcwd(), "results") | ||
403 | manager = BuildManager (server, builds_dir) | ||
404 | main_window.build_manager_view.set_model (manager.model) | ||
405 | |||
406 | # Do the running build setup | ||
407 | running_build = RunningBuild () | ||
408 | main_window.running_build_view.set_model (running_build.model) | ||
409 | running_build.connect ("build-succeeded", running_build_succeeded_cb, | ||
410 | manager) | ||
411 | running_build.connect ("build-failed", running_build_failed_cb, manager) | ||
412 | |||
413 | # We need to save the manager into the MainWindow so that the toolbar | ||
414 | # button can use it. | ||
415 | # FIXME: Refactor ? | ||
416 | main_window.manager = manager | ||
417 | |||
418 | # Use a timeout function for probing the event queue to find out if we | ||
419 | # have a message waiting for us. | ||
420 | gobject.timeout_add (200, | ||
421 | event_handle_timeout, | ||
422 | eventHandler, | ||
423 | running_build) | ||
424 | |||
425 | gtk.main() | ||
diff --git a/bitbake-dev/lib/bb/ui/uievent.py b/bitbake-dev/lib/bb/ui/uievent.py deleted file mode 100644 index 36302f4da7..0000000000 --- a/bitbake-dev/lib/bb/ui/uievent.py +++ /dev/null | |||
@@ -1,125 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer | ||
5 | # Copyright (C) 2006 - 2007 Richard Purdie | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | |||
20 | |||
21 | """ | ||
22 | Use this class to fork off a thread to recieve event callbacks from the bitbake | ||
23 | server and queue them for the UI to process. This process must be used to avoid | ||
24 | client/server deadlocks. | ||
25 | """ | ||
26 | |||
27 | import socket, threading, pickle | ||
28 | from SimpleXMLRPCServer import SimpleXMLRPCServer, SimpleXMLRPCRequestHandler | ||
29 | |||
30 | class BBUIEventQueue: | ||
31 | def __init__(self, BBServer): | ||
32 | |||
33 | self.eventQueue = [] | ||
34 | self.eventQueueLock = threading.Lock() | ||
35 | self.eventQueueNotify = threading.Event() | ||
36 | |||
37 | self.BBServer = BBServer | ||
38 | |||
39 | self.t = threading.Thread() | ||
40 | self.t.setDaemon(True) | ||
41 | self.t.run = self.startCallbackHandler | ||
42 | self.t.start() | ||
43 | |||
44 | def getEvent(self): | ||
45 | |||
46 | self.eventQueueLock.acquire() | ||
47 | |||
48 | if len(self.eventQueue) == 0: | ||
49 | self.eventQueueLock.release() | ||
50 | return None | ||
51 | |||
52 | item = self.eventQueue.pop(0) | ||
53 | |||
54 | if len(self.eventQueue) == 0: | ||
55 | self.eventQueueNotify.clear() | ||
56 | |||
57 | self.eventQueueLock.release() | ||
58 | return item | ||
59 | |||
60 | def waitEvent(self, delay): | ||
61 | self.eventQueueNotify.wait(delay) | ||
62 | return self.getEvent() | ||
63 | |||
64 | def queue_event(self, event): | ||
65 | self.eventQueueLock.acquire() | ||
66 | self.eventQueue.append(pickle.loads(event)) | ||
67 | self.eventQueueNotify.set() | ||
68 | self.eventQueueLock.release() | ||
69 | |||
70 | def startCallbackHandler(self): | ||
71 | |||
72 | server = UIXMLRPCServer() | ||
73 | self.host, self.port = server.socket.getsockname() | ||
74 | |||
75 | server.register_function( self.system_quit, "event.quit" ) | ||
76 | server.register_function( self.queue_event, "event.send" ) | ||
77 | server.socket.settimeout(1) | ||
78 | |||
79 | self.EventHandle = self.BBServer.registerEventHandler(self.host, self.port) | ||
80 | |||
81 | self.server = server | ||
82 | while not server.quit: | ||
83 | server.handle_request() | ||
84 | server.server_close() | ||
85 | |||
86 | def system_quit( self ): | ||
87 | """ | ||
88 | Shut down the callback thread | ||
89 | """ | ||
90 | try: | ||
91 | self.BBServer.unregisterEventHandler(self.EventHandle) | ||
92 | except: | ||
93 | pass | ||
94 | self.server.quit = True | ||
95 | |||
96 | class UIXMLRPCServer (SimpleXMLRPCServer): | ||
97 | |||
98 | def __init__( self, interface = ("localhost", 0) ): | ||
99 | self.quit = False | ||
100 | SimpleXMLRPCServer.__init__( self, | ||
101 | interface, | ||
102 | requestHandler=SimpleXMLRPCRequestHandler, | ||
103 | logRequests=False, allow_none=True) | ||
104 | |||
105 | def get_request(self): | ||
106 | while not self.quit: | ||
107 | try: | ||
108 | sock, addr = self.socket.accept() | ||
109 | sock.settimeout(1) | ||
110 | return (sock, addr) | ||
111 | except socket.timeout: | ||
112 | pass | ||
113 | return (None,None) | ||
114 | |||
115 | def close_request(self, request): | ||
116 | if request is None: | ||
117 | return | ||
118 | SimpleXMLRPCServer.close_request(self, request) | ||
119 | |||
120 | def process_request(self, request, client_address): | ||
121 | if request is None: | ||
122 | return | ||
123 | SimpleXMLRPCServer.process_request(self, request, client_address) | ||
124 | |||
125 | |||
diff --git a/bitbake-dev/lib/bb/ui/uihelper.py b/bitbake-dev/lib/bb/ui/uihelper.py deleted file mode 100644 index 151ffc5854..0000000000 --- a/bitbake-dev/lib/bb/ui/uihelper.py +++ /dev/null | |||
@@ -1,49 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | # | ||
4 | # Copyright (C) 2006 - 2007 Michael 'Mickey' Lauer | ||
5 | # Copyright (C) 2006 - 2007 Richard Purdie | ||
6 | # | ||
7 | # This program is free software; you can redistribute it and/or modify | ||
8 | # it under the terms of the GNU General Public License version 2 as | ||
9 | # published by the Free Software Foundation. | ||
10 | # | ||
11 | # This program is distributed in the hope that it will be useful, | ||
12 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
13 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
14 | # GNU General Public License for more details. | ||
15 | # | ||
16 | # You should have received a copy of the GNU General Public License along | ||
17 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
18 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
19 | |||
20 | class BBUIHelper: | ||
21 | def __init__(self): | ||
22 | self.needUpdate = False | ||
23 | self.running_tasks = {} | ||
24 | self.failed_tasks = {} | ||
25 | |||
26 | def eventHandler(self, event): | ||
27 | if isinstance(event, bb.build.TaskStarted): | ||
28 | self.running_tasks["%s %s\n" % (event._package, event._task)] = "" | ||
29 | self.needUpdate = True | ||
30 | if isinstance(event, bb.build.TaskSucceeded): | ||
31 | del self.running_tasks["%s %s\n" % (event._package, event._task)] | ||
32 | self.needUpdate = True | ||
33 | if isinstance(event, bb.build.TaskFailed): | ||
34 | del self.running_tasks["%s %s\n" % (event._package, event._task)] | ||
35 | self.failed_tasks["%s %s\n" % (event._package, event._task)] = "" | ||
36 | self.needUpdate = True | ||
37 | |||
38 | # Add runqueue event handling | ||
39 | #if isinstance(event, bb.runqueue.runQueueTaskCompleted): | ||
40 | # a = 1 | ||
41 | #if isinstance(event, bb.runqueue.runQueueTaskStarted): | ||
42 | # a = 1 | ||
43 | #if isinstance(event, bb.runqueue.runQueueTaskFailed): | ||
44 | # a = 1 | ||
45 | #if isinstance(event, bb.runqueue.runQueueExitWait): | ||
46 | # a = 1 | ||
47 | |||
48 | def getTasks(self): | ||
49 | return (self.running_tasks, self.failed_tasks) | ||
diff --git a/bitbake-dev/lib/bb/utils.py b/bitbake-dev/lib/bb/utils.py deleted file mode 100644 index 5fc1463e67..0000000000 --- a/bitbake-dev/lib/bb/utils.py +++ /dev/null | |||
@@ -1,431 +0,0 @@ | |||
1 | # ex:ts=4:sw=4:sts=4:et | ||
2 | # -*- tab-width: 4; c-basic-offset: 4; indent-tabs-mode: nil -*- | ||
3 | """ | ||
4 | BitBake Utility Functions | ||
5 | """ | ||
6 | |||
7 | # Copyright (C) 2004 Michael Lauer | ||
8 | # | ||
9 | # This program is free software; you can redistribute it and/or modify | ||
10 | # it under the terms of the GNU General Public License version 2 as | ||
11 | # published by the Free Software Foundation. | ||
12 | # | ||
13 | # This program is distributed in the hope that it will be useful, | ||
14 | # but WITHOUT ANY WARRANTY; without even the implied warranty of | ||
15 | # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the | ||
16 | # GNU General Public License for more details. | ||
17 | # | ||
18 | # You should have received a copy of the GNU General Public License along | ||
19 | # with this program; if not, write to the Free Software Foundation, Inc., | ||
20 | # 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301 USA. | ||
21 | |||
22 | digits = "0123456789" | ||
23 | ascii_letters = "abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ" | ||
24 | separators = ".-" | ||
25 | |||
26 | import re, fcntl, os, types | ||
27 | |||
28 | def explode_version(s): | ||
29 | r = [] | ||
30 | alpha_regexp = re.compile('^([a-zA-Z]+)(.*)$') | ||
31 | numeric_regexp = re.compile('^(\d+)(.*)$') | ||
32 | while (s != ''): | ||
33 | if s[0] in digits: | ||
34 | m = numeric_regexp.match(s) | ||
35 | r.append(int(m.group(1))) | ||
36 | s = m.group(2) | ||
37 | continue | ||
38 | if s[0] in ascii_letters: | ||
39 | m = alpha_regexp.match(s) | ||
40 | r.append(m.group(1)) | ||
41 | s = m.group(2) | ||
42 | continue | ||
43 | r.append(s[0]) | ||
44 | s = s[1:] | ||
45 | return r | ||
46 | |||
47 | def vercmp_part(a, b): | ||
48 | va = explode_version(a) | ||
49 | vb = explode_version(b) | ||
50 | sa = False | ||
51 | sb = False | ||
52 | while True: | ||
53 | if va == []: | ||
54 | ca = None | ||
55 | else: | ||
56 | ca = va.pop(0) | ||
57 | if vb == []: | ||
58 | cb = None | ||
59 | else: | ||
60 | cb = vb.pop(0) | ||
61 | if ca == None and cb == None: | ||
62 | return 0 | ||
63 | |||
64 | if type(ca) is types.StringType: | ||
65 | sa = ca in separators | ||
66 | if type(cb) is types.StringType: | ||
67 | sb = cb in separators | ||
68 | if sa and not sb: | ||
69 | return -1 | ||
70 | if not sa and sb: | ||
71 | return 1 | ||
72 | |||
73 | if ca > cb: | ||
74 | return 1 | ||
75 | if ca < cb: | ||
76 | return -1 | ||
77 | |||
78 | def vercmp(ta, tb): | ||
79 | (ea, va, ra) = ta | ||
80 | (eb, vb, rb) = tb | ||
81 | |||
82 | r = int(ea)-int(eb) | ||
83 | if (r == 0): | ||
84 | r = vercmp_part(va, vb) | ||
85 | if (r == 0): | ||
86 | r = vercmp_part(ra, rb) | ||
87 | return r | ||
88 | |||
89 | def explode_deps(s): | ||
90 | """ | ||
91 | Take an RDEPENDS style string of format: | ||
92 | "DEPEND1 (optional version) DEPEND2 (optional version) ..." | ||
93 | and return a list of dependencies. | ||
94 | Version information is ignored. | ||
95 | """ | ||
96 | r = [] | ||
97 | l = s.split() | ||
98 | flag = False | ||
99 | for i in l: | ||
100 | if i[0] == '(': | ||
101 | flag = True | ||
102 | #j = [] | ||
103 | if not flag: | ||
104 | r.append(i) | ||
105 | #else: | ||
106 | # j.append(i) | ||
107 | if flag and i.endswith(')'): | ||
108 | flag = False | ||
109 | # Ignore version | ||
110 | #r[-1] += ' ' + ' '.join(j) | ||
111 | return r | ||
112 | |||
113 | def explode_dep_versions(s): | ||
114 | """ | ||
115 | Take an RDEPENDS style string of format: | ||
116 | "DEPEND1 (optional version) DEPEND2 (optional version) ..." | ||
117 | and return a dictonary of dependencies and versions. | ||
118 | """ | ||
119 | r = {} | ||
120 | l = s.split() | ||
121 | lastdep = None | ||
122 | lastver = "" | ||
123 | inversion = False | ||
124 | for i in l: | ||
125 | if i[0] == '(': | ||
126 | inversion = True | ||
127 | lastver = i[1:] or "" | ||
128 | #j = [] | ||
129 | elif inversion and i.endswith(')'): | ||
130 | inversion = False | ||
131 | lastver = lastver + " " + (i[:-1] or "") | ||
132 | r[lastdep] = lastver | ||
133 | elif not inversion: | ||
134 | r[i] = None | ||
135 | lastdep = i | ||
136 | lastver = "" | ||
137 | elif inversion: | ||
138 | lastver = lastver + " " + i | ||
139 | |||
140 | return r | ||
141 | |||
142 | def _print_trace(body, line): | ||
143 | """ | ||
144 | Print the Environment of a Text Body | ||
145 | """ | ||
146 | import bb | ||
147 | |||
148 | # print the environment of the method | ||
149 | bb.msg.error(bb.msg.domain.Util, "Printing the environment of the function") | ||
150 | min_line = max(1,line-4) | ||
151 | max_line = min(line+4,len(body)-1) | ||
152 | for i in range(min_line,max_line+1): | ||
153 | bb.msg.error(bb.msg.domain.Util, "\t%.4d:%s" % (i, body[i-1]) ) | ||
154 | |||
155 | |||
156 | def better_compile(text, file, realfile): | ||
157 | """ | ||
158 | A better compile method. This method | ||
159 | will print the offending lines. | ||
160 | """ | ||
161 | try: | ||
162 | return compile(text, file, "exec") | ||
163 | except Exception, e: | ||
164 | import bb,sys | ||
165 | |||
166 | # split the text into lines again | ||
167 | body = text.split('\n') | ||
168 | bb.msg.error(bb.msg.domain.Util, "Error in compiling python function in: ", realfile) | ||
169 | bb.msg.error(bb.msg.domain.Util, "The lines resulting into this error were:") | ||
170 | bb.msg.error(bb.msg.domain.Util, "\t%d:%s:'%s'" % (e.lineno, e.__class__.__name__, body[e.lineno-1])) | ||
171 | |||
172 | _print_trace(body, e.lineno) | ||
173 | |||
174 | # exit now | ||
175 | sys.exit(1) | ||
176 | |||
177 | def better_exec(code, context, text, realfile): | ||
178 | """ | ||
179 | Similiar to better_compile, better_exec will | ||
180 | print the lines that are responsible for the | ||
181 | error. | ||
182 | """ | ||
183 | import bb,sys | ||
184 | try: | ||
185 | exec code in context | ||
186 | except: | ||
187 | (t,value,tb) = sys.exc_info() | ||
188 | |||
189 | if t in [bb.parse.SkipPackage, bb.build.FuncFailed]: | ||
190 | raise | ||
191 | |||
192 | # print the Header of the Error Message | ||
193 | bb.msg.error(bb.msg.domain.Util, "Error in executing python function in: %s" % realfile) | ||
194 | bb.msg.error(bb.msg.domain.Util, "Exception:%s Message:%s" % (t,value) ) | ||
195 | |||
196 | # let us find the line number now | ||
197 | while tb.tb_next: | ||
198 | tb = tb.tb_next | ||
199 | |||
200 | import traceback | ||
201 | line = traceback.tb_lineno(tb) | ||
202 | |||
203 | _print_trace( text.split('\n'), line ) | ||
204 | |||
205 | raise | ||
206 | |||
207 | def Enum(*names): | ||
208 | """ | ||
209 | A simple class to give Enum support | ||
210 | """ | ||
211 | |||
212 | assert names, "Empty enums are not supported" | ||
213 | |||
214 | class EnumClass(object): | ||
215 | __slots__ = names | ||
216 | def __iter__(self): return iter(constants) | ||
217 | def __len__(self): return len(constants) | ||
218 | def __getitem__(self, i): return constants[i] | ||
219 | def __repr__(self): return 'Enum' + str(names) | ||
220 | def __str__(self): return 'enum ' + str(constants) | ||
221 | |||
222 | class EnumValue(object): | ||
223 | __slots__ = ('__value') | ||
224 | def __init__(self, value): self.__value = value | ||
225 | Value = property(lambda self: self.__value) | ||
226 | EnumType = property(lambda self: EnumType) | ||
227 | def __hash__(self): return hash(self.__value) | ||
228 | def __cmp__(self, other): | ||
229 | # C fans might want to remove the following assertion | ||
230 | # to make all enums comparable by ordinal value {;)) | ||
231 | assert self.EnumType is other.EnumType, "Only values from the same enum are comparable" | ||
232 | return cmp(self.__value, other.__value) | ||
233 | def __invert__(self): return constants[maximum - self.__value] | ||
234 | def __nonzero__(self): return bool(self.__value) | ||
235 | def __repr__(self): return str(names[self.__value]) | ||
236 | |||
237 | maximum = len(names) - 1 | ||
238 | constants = [None] * len(names) | ||
239 | for i, each in enumerate(names): | ||
240 | val = EnumValue(i) | ||
241 | setattr(EnumClass, each, val) | ||
242 | constants[i] = val | ||
243 | constants = tuple(constants) | ||
244 | EnumType = EnumClass() | ||
245 | return EnumType | ||
246 | |||
247 | def lockfile(name): | ||
248 | """ | ||
249 | Use the file fn as a lock file, return when the lock has been acquired. | ||
250 | Returns a variable to pass to unlockfile(). | ||
251 | """ | ||
252 | path = os.path.dirname(name) | ||
253 | if not os.path.isdir(path): | ||
254 | import bb, sys | ||
255 | bb.msg.error(bb.msg.domain.Util, "Error, lockfile path does not exist!: %s" % path) | ||
256 | sys.exit(1) | ||
257 | |||
258 | while True: | ||
259 | # If we leave the lockfiles lying around there is no problem | ||
260 | # but we should clean up after ourselves. This gives potential | ||
261 | # for races though. To work around this, when we acquire the lock | ||
262 | # we check the file we locked was still the lock file on disk. | ||
263 | # by comparing inode numbers. If they don't match or the lockfile | ||
264 | # no longer exists, we start again. | ||
265 | |||
266 | # This implementation is unfair since the last person to request the | ||
267 | # lock is the most likely to win it. | ||
268 | |||
269 | try: | ||
270 | lf = open(name, "a+") | ||
271 | fcntl.flock(lf.fileno(), fcntl.LOCK_EX) | ||
272 | statinfo = os.fstat(lf.fileno()) | ||
273 | if os.path.exists(lf.name): | ||
274 | statinfo2 = os.stat(lf.name) | ||
275 | if statinfo.st_ino == statinfo2.st_ino: | ||
276 | return lf | ||
277 | # File no longer exists or changed, retry | ||
278 | lf.close | ||
279 | except Exception, e: | ||
280 | continue | ||
281 | |||
282 | def unlockfile(lf): | ||
283 | """ | ||
284 | Unlock a file locked using lockfile() | ||
285 | """ | ||
286 | os.unlink(lf.name) | ||
287 | fcntl.flock(lf.fileno(), fcntl.LOCK_UN) | ||
288 | lf.close | ||
289 | |||
290 | def md5_file(filename): | ||
291 | """ | ||
292 | Return the hex string representation of the MD5 checksum of filename. | ||
293 | """ | ||
294 | try: | ||
295 | import hashlib | ||
296 | m = hashlib.md5() | ||
297 | except ImportError: | ||
298 | import md5 | ||
299 | m = md5.new() | ||
300 | |||
301 | for line in open(filename): | ||
302 | m.update(line) | ||
303 | return m.hexdigest() | ||
304 | |||
305 | def sha256_file(filename): | ||
306 | """ | ||
307 | Return the hex string representation of the 256-bit SHA checksum of | ||
308 | filename. On Python 2.4 this will return None, so callers will need to | ||
309 | handle that by either skipping SHA checks, or running a standalone sha256sum | ||
310 | binary. | ||
311 | """ | ||
312 | try: | ||
313 | import hashlib | ||
314 | except ImportError: | ||
315 | return None | ||
316 | |||
317 | s = hashlib.sha256() | ||
318 | for line in open(filename): | ||
319 | s.update(line) | ||
320 | return s.hexdigest() | ||
321 | |||
322 | def preserved_envvars_list(): | ||
323 | return [ | ||
324 | 'BBPATH', | ||
325 | 'BB_PRESERVE_ENV', | ||
326 | 'BB_ENV_WHITELIST', | ||
327 | 'BB_ENV_EXTRAWHITE', | ||
328 | 'COLORTERM', | ||
329 | 'DBUS_SESSION_BUS_ADDRESS', | ||
330 | 'DESKTOP_SESSION', | ||
331 | 'DESKTOP_STARTUP_ID', | ||
332 | 'DISPLAY', | ||
333 | 'GNOME_KEYRING_PID', | ||
334 | 'GNOME_KEYRING_SOCKET', | ||
335 | 'GPG_AGENT_INFO', | ||
336 | 'GTK_RC_FILES', | ||
337 | 'HOME', | ||
338 | 'LANG', | ||
339 | 'LOGNAME', | ||
340 | 'PATH', | ||
341 | 'PWD', | ||
342 | 'SESSION_MANAGER', | ||
343 | 'SHELL', | ||
344 | 'SSH_AUTH_SOCK', | ||
345 | 'TERM', | ||
346 | 'USER', | ||
347 | 'USERNAME', | ||
348 | '_', | ||
349 | 'XAUTHORITY', | ||
350 | 'XDG_DATA_DIRS', | ||
351 | 'XDG_SESSION_COOKIE', | ||
352 | ] | ||
353 | |||
354 | def filter_environment(good_vars): | ||
355 | """ | ||
356 | Create a pristine environment for bitbake. This will remove variables that | ||
357 | are not known and may influence the build in a negative way. | ||
358 | """ | ||
359 | |||
360 | import bb | ||
361 | |||
362 | removed_vars = [] | ||
363 | for key in os.environ.keys(): | ||
364 | if key in good_vars: | ||
365 | continue | ||
366 | |||
367 | removed_vars.append(key) | ||
368 | os.unsetenv(key) | ||
369 | del os.environ[key] | ||
370 | |||
371 | if len(removed_vars): | ||
372 | bb.debug(1, "Removed the following variables from the environment:", ",".join(removed_vars)) | ||
373 | |||
374 | return removed_vars | ||
375 | |||
376 | def clean_environment(): | ||
377 | """ | ||
378 | Clean up any spurious environment variables. This will remove any | ||
379 | variables the user hasn't chose to preserve. | ||
380 | """ | ||
381 | if 'BB_PRESERVE_ENV' not in os.environ: | ||
382 | if 'BB_ENV_WHITELIST' in os.environ: | ||
383 | good_vars = os.environ['BB_ENV_WHITELIST'].split() | ||
384 | else: | ||
385 | good_vars = preserved_envvars_list() | ||
386 | if 'BB_ENV_EXTRAWHITE' in os.environ: | ||
387 | good_vars.extend(os.environ['BB_ENV_EXTRAWHITE'].split()) | ||
388 | filter_environment(good_vars) | ||
389 | |||
390 | def empty_environment(): | ||
391 | """ | ||
392 | Remove all variables from the environment. | ||
393 | """ | ||
394 | for s in os.environ.keys(): | ||
395 | os.unsetenv(s) | ||
396 | del os.environ[s] | ||
397 | |||
398 | def build_environment(d): | ||
399 | """ | ||
400 | Build an environment from all exported variables. | ||
401 | """ | ||
402 | import bb | ||
403 | for var in bb.data.keys(d): | ||
404 | export = bb.data.getVarFlag(var, "export", d) | ||
405 | if export: | ||
406 | os.environ[var] = bb.data.getVar(var, d, True) | ||
407 | |||
408 | def prunedir(topdir): | ||
409 | # Delete everything reachable from the directory named in 'topdir'. | ||
410 | # CAUTION: This is dangerous! | ||
411 | for root, dirs, files in os.walk(topdir, topdown=False): | ||
412 | for name in files: | ||
413 | os.remove(os.path.join(root, name)) | ||
414 | for name in dirs: | ||
415 | if os.path.islink(os.path.join(root, name)): | ||
416 | os.remove(os.path.join(root, name)) | ||
417 | else: | ||
418 | os.rmdir(os.path.join(root, name)) | ||
419 | os.rmdir(topdir) | ||
420 | |||
421 | # | ||
422 | # Could also use return re.compile("(%s)" % "|".join(map(re.escape, suffixes))).sub(lambda mo: "", var) | ||
423 | # but thats possibly insane and suffixes is probably going to be small | ||
424 | # | ||
425 | def prune_suffix(var, suffixes, d): | ||
426 | # See if var ends with any of the suffixes listed and | ||
427 | # remove it if found | ||
428 | for suffix in suffixes: | ||
429 | if var.endswith(suffix): | ||
430 | return var.replace(suffix, "") | ||
431 | return var | ||